def get_btchromas_loudness(h5):
    """
    Similar to btchroma, but adds the loudness back.
    We use the segments_loudness_max
    There is no max value constraint, simply no negative values.
    """
    # if string, open and get chromas, if h5, get chromas
    if type(h5).__name__ == "str":
        h5 = GETTERS.open_h5_file_read(h5)
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
        h5.close()
    else:
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
    # get the series of starts for segments and beats
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # add back loudness
    chromas = chromas.T * idB(loudnessmax)
    # aligned features
    btchroma = align_feats(chromas, segstarts, btstarts, duration)
    if btchroma is None:
        return None
    # done (no renormalization)
    return btchroma
Exemple #2
0
def get_btchromas_loudness(h5):
    """
    Similar to btchroma, but adds the loudness back.
    We use the segments_loudness_max
    There is no max value constraint, simply no negative values.
    """
    # if string, open and get chromas, if h5, get chromas
    if type(h5).__name__ == 'str':
        h5 = GETTERS.open_h5_file_read(h5)
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
        h5.close()
    else:
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
    # get the series of starts for segments and beats
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # add back loudness
    chromas = chromas.T * idB(loudnessmax)
    # aligned features
    btchroma = align_feats(chromas, segstarts, btstarts, duration)
    if btchroma is None:
        return None
    # done (no renormalization)
    return btchroma
Exemple #3
0
def get_bttimbre(h5):
    """
    Get beat-aligned timbre from a song file of the Million Song Dataset
    INPUT:
       h5          - filename or open h5 file
    RETURN:
       bttimbre    - beat-aligned timbre, one beat per column
                     or None if something went wrong (e.g. no beats)
    """
    # if string, open and get timbre, if h5, get timbre
    if type(h5).__name__ == 'str':
        h5 = GETTERS.open_h5_file_read(h5)
        timbre = GETTERS.get_segments_timbre(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        h5.close()
    else:
        timbre = GETTERS.get_segments_timbre(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
    # get the series of starts for segments and beats
    # NOTE: MAYBE USELESS?
    # result for track: 'TR0002Q11C3FA8332D'
    #    segstarts.shape = (708,)
    #    btstarts.shape = (304,)
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # aligned features
    bttimbre = align_feats(timbre.T, segstarts, btstarts, duration)
    if bttimbre is None:
        return None
    # done (no renormalization)
    return bttimbre
Exemple #4
0
def get_bttatums(h5):
    """
    Get beat-aligned timbre from a song file of the Million Song Dataset
    INPUT:
       h5          - filename or open h5 file
    RETURN:
       bttimbre    - beat-aligned timbre, one beat per column
                     or None if something went wrong (e.g. no beats)
    """
    # if string, open and get timbre, if h5, get timbre
    if type(h5).__name__ == 'str':
        h5 = GETTERS.open_h5_file_read(h5)
        tatums = GETTERS.get_tatums_start(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        h5.close()
    else:
        tatums = GETTERS.get_tatums_start(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
    tatums = np.array([tatums])
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # aligned features
    bttatums = align_feats(tatums, segstarts, btstarts, duration)
    if bttatums is None:
        return None
    # done (no renormalization)
    return bttatums
def get_bttimbre(h5):
    """
    Get beat-aligned timbre from a song file of the Million Song Dataset
    INPUT:
       h5          - filename or open h5 file
    RETURN:
       bttimbre    - beat-aligned timbre, one beat per column
                     or None if something went wrong (e.g. no beats)
    """
    # if string, open and get timbre, if h5, get timbre
    if type(h5).__name__ == "str":
        h5 = GETTERS.open_h5_file_read(h5)
        timbre = GETTERS.get_segments_timbre(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        h5.close()
    else:
        timbre = GETTERS.get_segments_timbre(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
    # get the series of starts for segments and beats
    # NOTE: MAYBE USELESS?
    # result for track: 'TR0002Q11C3FA8332D'
    #    segstarts.shape = (708,)
    #    btstarts.shape = (304,)
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # aligned features
    bttimbre = align_feats(timbre.T, segstarts, btstarts, duration)
    if bttimbre is None:
        return None
    # done (no renormalization)
    return bttimbre
def msd_beatchroma(filename):
    """
    Get the same beatchroma as Dan
    Our filename is the full path
    TESTED
    """
    nchr = 12
    # get segments, pitches, beats, loudness
    h5 = GETTERS.open_h5_file_read(filename)
    pitches = GETTERS.get_segments_pitches(h5).T
    loudness = GETTERS.get_segments_loudness_start(h5)
    Tsegs = GETTERS.get_segments_start(h5)
    Tbeats = GETTERS.get_beats_start(h5)
    h5.close()
    # sanity checks
    if len(Tsegs) < 3 or len(Tbeats) < 2:
        return None
    # get chroma and apply per segments loudness
    Segs = pitches * np.tile(np.power(10., loudness / 20.), (nchr, 1))
    if Segs.shape[0] < 12 or Segs.shape[1] < 3:
        return None
    # properly figure time overlaps and weights
    C = resample_mx(Segs, Tsegs, Tbeats)
    # renormalize columns
    n = C.max(axis=0)
    return C * np.tile(1. / n, (nchr, 1))
Exemple #7
0
def h5_files_to_np_array(dir, filename):
    list = get_h5_files(dir)
    num_done = 0
    seg_array = []
    #Go through every file and get the desired information.
    for file in list:
        song = getters.open_h5_file_read(file)
        seg_append = np.array(getters.get_segments_pitches(song))
        seg_append = np.c_[seg_append,
                           np.array(getters.get_segments_timbre(song))]
        seg_append = np.c_[seg_append,
                           np.array(getters.get_segments_loudness_max(song))]
        seg_append = np.c_[seg_append,
                           np.array(getters.get_segments_loudness_start(song))]
        start = np.array(getters.get_segments_start(song))
        for i in range(0, len(start) - 1):
            if i != (len(start) - 1):
                start[i] = start[i + 1] - start[i]
        start[len(start) -
              1] = getters.get_duration(song) - start[len(start) - 1]
        seg_append = np.c_[seg_append, start]
        #Add the arrays to the bottom of the list
        seg_array.extend(seg_append.tolist())
        song.close()
        num_done = num_done + 1
        #Gives a count for every 500 files completed
        if num_done % 500 == 0:
            print num_done, " of ", len(list)
    #Convert the list to a Numpy array
    seg_array = np.array(seg_array)
    #Save the array in a file
    seg_array.dump(filename)
    print len(seg_array), " number of segments in the set."
    return seg_array
Exemple #8
0
def plots(track):
    f, axarr = plt.subplots(2, sharex=True)
    path = "../../msd_dense_subset/dense/"+track[2]+"/"+track[3]+"/"+track[4]+"/"+track+".h5"
    h5 = GETTERS.open_h5_file_read(path)
    segments = (GETTERS.get_segments_start(h5))
    sections = (GETTERS.get_sections_start(h5))
    max_loudness = (GETTERS.get_segments_loudness_max(h5))
    loudness = (GETTERS.get_segments_loudness_start(h5))
    average_loudness = (max_loudness + loudness) / 2
    average_loudness_song = GETTERS.get_loudness(h5)
    start_fade_out = GETTERS.get_start_of_fade_out(h5)
    end_fade_in = GETTERS.get_end_of_fade_in(h5)
    pitches = GETTERS.get_segments_pitches(h5)
    h5.close()
    
    
    axarr[0].set_title('loudness curve for ' + ut.get_track_info(track))
    axarr[0].plot(segments, average_loudness, label='Filtered')
    axarr[0].axhline(average_loudness_song, color='green')
    for section in enumerate(sections):
        axarr[0].axvline(section[1], color='red')
    axarr[0].axvline(start_fade_out, color='green')
    axarr[0].axvline(end_fade_in, color='green')
    
    
    idx = list()
    for section in sections:
        dif = segments - section
        posdif = np.where(dif >=0)
        idx.append(posdif[0][0])
    axarr[1].set_title('Chroma values for ' + ut.get_track_info(track))
    #axarr[1].set_xticks(idx,sections.astype(int))
    extent =[0,segments.shape[0],0,12]
    axarr[1].imshow(pitches.transpose(),extent = extent,aspect = 'auto',interpolation='nearest',origin='lower')
    plt.show()
def msd_beatchroma(filename):
    """
    Get the same beatchroma as Dan
    Our filename is the full path
    TESTED
    """
    nchr=12
    # get segments, pitches, beats, loudness
    h5 = GETTERS.open_h5_file_read(filename)
    pitches = GETTERS.get_segments_pitches(h5).T
    loudness = GETTERS.get_segments_loudness_start(h5)
    Tsegs = GETTERS.get_segments_start(h5)
    Tbeats = GETTERS.get_beats_start(h5)
    h5.close()
    # sanity checks
    if len(Tsegs) < 3 or len(Tbeats) < 2:
        return None
    # get chroma and apply per segments loudness
    Segs = pitches * np.tile(np.power(10., loudness/20.), (nchr, 1))
    if Segs.shape[0] < 12 or Segs.shape[1] < 3:
        return None
    # properly figure time overlaps and weights
    C = resample_mx(Segs, Tsegs, Tbeats)
    # renormalize columns
    n = C.max(axis=0)
    return C * np.tile(1./n, (nchr, 1))
def get_btloudnessmax(h5):
    """
    Get beat-aligned loudness max from a song file of the Million Song Dataset
    INPUT:
       h5             - filename or open h5 file
    RETURN:
       btloudnessmax  - beat-aligned loudness max, one beat per column
                        or None if something went wrong (e.g. no beats)
    """
    # if string, open and get max loudness, if h5, get max loudness
    if type(h5).__name__ == 'str':
        h5 = GETTERS.open_h5_file_read(h5)
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        h5.close()
    else:
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
    # get the series of starts for segments and beats
    # NOTE: MAYBE USELESS?
    # result for track: 'TR0002Q11C3FA8332D'
    #    segstarts.shape = (708,)
    #    btstarts.shape = (304,)
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # reverse dB
    loudnessmax = idB(loudnessmax)
    # aligned features
    btloudnessmax = align_feats(loudnessmax.reshape(1,
                                                    loudnessmax.shape[0]),
                                segstarts, btstarts, duration)
    if btloudnessmax is None:
        return None
    # set it back to dB
    btloudnessmax = dB(btloudnessmax + 1e-10)
    # done (no renormalization)
    return btloudnessmax
Exemple #11
0
def get_btloudnessmax(h5):
    """
    Get beat-aligned loudness max from a song file of the Million Song Dataset
    INPUT:
       h5             - filename or open h5 file
    RETURN:
       btloudnessmax  - beat-aligned loudness max, one beat per column
                        or None if something went wrong (e.g. no beats)
    """
    # if string, open and get max loudness, if h5, get max loudness
    if type(h5).__name__ == 'str':
        h5 = GETTERS.open_h5_file_read(h5)
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        h5.close()
    else:
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
    # get the series of starts for segments and beats
    # NOTE: MAYBE USELESS?
    # result for track: 'TR0002Q11C3FA8332D'
    #    segstarts.shape = (708,)
    #    btstarts.shape = (304,)
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # reverse dB
    loudnessmax = idB(loudnessmax)
    # aligned features
    btloudnessmax = align_feats(loudnessmax.reshape(1, loudnessmax.shape[0]),
                                segstarts, btstarts, duration)
    if btloudnessmax is None:
        return None
    # set it back to dB
    btloudnessmax = dB(btloudnessmax + 1e-10)
    # done (no renormalization)
    return btloudnessmax
Exemple #12
0
def get_btchromas(h5):
    """
    Get beat-aligned chroma from a song file of the Million Song Dataset
    INPUT:
       h5          - filename or open h5 file
    RETURN:
       btchromas   - beat-aligned chromas, one beat per column
                     or None if something went wrong (e.g. no beats)
    """
    # if string, open and get chromas, if h5, get chromas
    if type(h5).__name__ == 'str':
        h5 = GETTERS.open_h5_file_read(h5)
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        h5.close()
    else:
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
    # get the series of starts for segments and beats
    # NOTE: MAYBE USELESS?
    # result for track: 'TR0002Q11C3FA8332D'
    #    segstarts.shape = (708,)
    #    btstarts.shape = (304,)
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # aligned features
    btchroma = align_feats(chromas.T, segstarts, btstarts, duration)
    if btchroma is None:
        return None
    # Renormalize. Each column max is 1.
    maxs = btchroma.max(axis=0)
    maxs[np.where(maxs == 0)] = 1.
    btchroma = (btchroma / maxs)
    # done
    return btchroma
def get_btchromas(h5):
    """
    Get beat-aligned chroma from a song file of the Million Song Dataset
    INPUT:
       h5          - filename or open h5 file
    RETURN:
       btchromas   - beat-aligned chromas, one beat per column
                     or None if something went wrong (e.g. no beats)
    """
    # if string, open and get chromas, if h5, get chromas
    if type(h5).__name__ == 'str':
        h5 = GETTERS.open_h5_file_read(h5)
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        h5.close()
    else:
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
    # get the series of starts for segments and beats
    # NOTE: MAYBE USELESS?
    # result for track: 'TR0002Q11C3FA8332D'
    #    segstarts.shape = (708,)
    #    btstarts.shape = (304,)
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # aligned features
    btchroma = align_feats(chromas.T, segstarts, btstarts, duration)
    if btchroma is None:
        return None
    # Renormalize. Each column max is 1.
    maxs = btchroma.max(axis=0)
    maxs[np.where(maxs == 0)] = 1.
    btchroma = (btchroma / maxs)
    # done
    return btchroma
Exemple #14
0
def extract_segments_start(base_dir, MSD_track_ID):
    """Get segments start list by MSD track ID

    :param base_dir: base directory of data file
    :param MSD_track_ID: MSD track ID
    :return segments_start: segments start time array
    :rtype: ndarry
    """

    base_dir = "../data/"
    filename = base_dir + MSD_track_ID[2] + "/" + MSD_track_ID[3] + "/" \
        + MSD_track_ID[4] + "/" + MSD_track_ID + ".h5"
    h5 = hdf5_getters.open_h5_file_read(filename)
    segments_start = hdf5_getters.get_segments_start(h5)
    h5.close()

    return segments_start
def get_ten_feats_fullpath(feats_all, segments_all,  h5file):
    h5 = hdf5_getters.open_h5_file_read(h5file)
    timbre = hdf5_getters.get_segments_timbre(h5)
    loudness_start = hdf5_getters.get_segments_loudness_start(h5)
    loudness_max = hdf5_getters.get_segments_loudness_max(h5)
    loudness_max_time = hdf5_getters.get_segments_loudness_max_time(h5)
    C = hdf5_getters.get_segments_pitches(h5)        
    
    (hdf5path, filename) = os.path.split(h5file)
    target_audio_dir = os.path.split(hdf5path)[0]
    target_audio_path = os.path.join(target_audio_dir, os.path.splitext(filename)[0])
    segments_all.append(np.array([hdf5_getters.get_segments_start(h5), target_audio_path]))
    
    feats_all.append(np.hstack((timbre, loudness_start.reshape((loudness_start.shape[0], 1)),
                                loudness_max.reshape((loudness_max.shape[0], 1)),
                                loudness_max_time.reshape((loudness_max_time.shape[0], 1)),
                                C)))
    h5.close()
Exemple #16
0
 def extractFeatures(self):
     with hdf5_getters.open_h5_file_read(self.h5) as h5:
         self.tempo = hdf5_getters.get_tempo(h5, 0)
         ## Select the first 20 segments of the song
         self.segments_start = hdf5_getters.get_segments_start(h5)[0:20]
         ## Each segment has 12 timbre coeffs
         self.segments_timbre = hdf5_getters.get_segments_timbre(
             h5)[0:20].flatten()
         ## Each segment contains info on 12 pitch classes (C, C#, D to B)
         self.segments_pitches = hdf5_getters.get_segments_timbre(
             h5)[0:20].flatten()
         ## Segment intensity points
         self.segments_loudness_max = hdf5_getters.get_segments_loudness_max(
             h5)[0:20].flatten()
         ## Beats
         self.beats_start = hdf5_getters.get_beats_start(h5)
         self.beats_duration = np.diff(self.beats_start)[0:20]
         self.beats_average_period = np.mean(self.beats_duration)
Exemple #17
0
def get_loudness(track,h5=None):
    #returns
    #0: the average loudness of the track
    #1: the variance of the loudness over the track
    #2: the difference between the highest and lowest dB value between the end of fade in and start of fade out
    close = (h5== None)
    if h5 == None:
        path = "../../msd_dense_subset/dense/"+track[2]+"/"+track[3]+"/"+track[4]+"/"+track+".h5"
        h5 = GETTERS.open_h5_file_read(path)
    loudness_avg = GETTERS.get_loudness(h5)
    loudnesses_interval = GETTERS.get_segments_loudness_max(h5)
    start_segments = GETTERS.get_segments_start(h5)
    start_fade_out = GETTERS.get_start_of_fade_out(h5)
    end_fade_in = GETTERS.get_end_of_fade_in(h5)
    idx = np.where((start_segments > end_fade_in) & (start_segments < start_fade_out))
    if close:
        h5.close()
    #return (loudness_avg, np.var(loudnesses_interval[idx]), min(loudnesses_interval[idx]),  max(loudnesses_interval[idx]))
    return (loudness_avg, np.var(loudnesses_interval[idx]), abs(max(loudnesses_interval[idx])  - abs(min(loudnesses_interval[idx]))))
Exemple #18
0
def h5_to_csv_fields(h5,song):
	'''Converts h5 format to text
		Inputs: h5, an h5 file object, usable with the wrapper code MSongsDB
			song, an integer, representing which song in the h5 file to take the info out of (h5 files contain many songs)
		Output: a string representing all the information of this song, as a single line of a csv file
	'''
	rv=[]
	##All these are regular getter functions from wrapper code
	rv.append(gt.get_artist_name(h5,song))
	rv.append(gt.get_title(h5, song))
	rv.append(gt.get_release(h5, song))
	rv.append(gt.get_year(h5,song))
	rv.append(gt.get_duration(h5,song))
	rv.append(gt.get_artist_familiarity(h5,song))
	rv.append(gt.get_artist_hotttnesss(h5,song))
	rv.append(gt.get_song_hotttnesss(h5, song))
	
	##artist_terms, artist_terms_freq, and artist_terms_weight getter functions
	##are all arrays, so we need to turn them into strings first. We used '_' as a separator
	rv.append(array_to_csv_field(list(gt.get_artist_terms(h5,song))))
	rv.append(array_to_csv_field(list(gt.get_artist_terms_freq(h5,song))))
	rv.append(array_to_csv_field(list(gt.get_artist_terms_weight(h5,song))))
	rv.append(gt.get_mode(h5,song))
	rv.append(gt.get_key(h5,song))
	rv.append(gt.get_tempo(h5,song))
	rv.append(gt.get_loudness(h5,song))
	rv.append(gt.get_danceability(h5,song))
	rv.append(gt.get_energy(h5,song))
	rv.append(gt.get_time_signature(h5,song))
	rv.append(array_to_csv_field(list(gt.get_segments_start(h5,song))))
	##These arrays have vectors (Arrays) as items, 12 dimensional each
	##An array like [[1,2,3],[4,5,6]] will be written to csv as '1;2;3_4;5;6', i.e. there's two types of separators
	rv.append(double_Array_to_csv_field(list(gt.get_segments_timbre(h5,song)),'_',';'))
	rv.append(double_Array_to_csv_field(list(gt.get_segments_pitches(h5,song)),'_',';'))
	rv.append(array_to_csv_field(list(gt.get_segments_loudness_start(h5,song))))
	rv.append(array_to_csv_field(list(gt.get_segments_loudness_max(h5,song))))
	rv.append(array_to_csv_field(list(gt.get_segments_loudness_max_time(h5,song))))
	rv.append(array_to_csv_field(list(gt.get_sections_start(h5,song))))
	##turn this list into a string with comma separators (i.e. a csv line)
	rv_string=array_to_csv_field(rv, ",")
	rv_string+="\n"
	return rv_string
Exemple #19
0
def plot_loudness_curve(track):
    path = "../../msd_dense_subset/mood/"+track[2]+"/"+track[3]+"/"+track[4]+"/"+track+".h5"
    h5 = GETTERS.open_h5_file_read(path)
    segments = (GETTERS.get_segments_start(h5))
    sections = (GETTERS.get_sections_start(h5))
    max_loudness = (GETTERS.get_segments_loudness_max(h5))
    loudness = (GETTERS.get_segments_loudness_start(h5))
    average_loudness = (max_loudness + loudness) / 2
    average_loudness_song = GETTERS.get_loudness(h5)
    start_fade_out = GETTERS.get_start_of_fade_out(h5)
    end_fade_in = GETTERS.get_end_of_fade_in(h5)
    plt.title('loudness curve for ' + ut.get_track_info(track))
    plt.ylabel('Loudness(dB)')
    plt.xlabel('Time(s)')
    plt.plot(segments, average_loudness, label='Filtered')
    plt.axhline(average_loudness_song, color='green',label='average')
    for section in enumerate(sections):
        plt.axvline(section[1], color='red')
    plt.axvline(start_fade_out, color='green')
    plt.axvline(end_fade_in, color='green')
    #plt.legend(('average'))
    h5.close()
    plt.show()
Exemple #20
0
def plot_chroma(track):   
    path = "../../msd_dense_subset/mood/"+track[2]+"/"+track[3]+"/"+track[4]+"/"+track+".h5"
    h5 = GETTERS.open_h5_file_read(path)
    pitches = GETTERS.get_segments_pitches(h5)
    segments = GETTERS.get_segments_start(h5)
    sections = GETTERS.get_sections_start(h5)
    idx = list()
    for section in sections:
        dif = segments - section
        posdif = np.where(dif >=0)
        idx.append(posdif[0][0])
    plt.figure(num=None, figsize=(20, 12), dpi=80, facecolor='w', edgecolor='k')
    plt.title('Chroma values for ' + ut.get_track_info(track))
    plt.ylabel('Chroma values')
    plt.xlabel('Time')
    plt.xticks(idx,sections.astype(int))
    extent =[0,segments.shape[0],0,12]
    plt.imshow(pitches.transpose(), extent = extent,aspect = 'auto',interpolation='nearest',origin='lower')
    plt.colorbar()
    plt.autoscale(enable=True, axis='x')
    #plt.pcolor(pitches.transpose())
    h5.close()
    plt.show()
Exemple #21
0
def get_loudness(track, h5=None):
    #returns
    #0: the average loudness of the track
    #1: the variance of the loudness over the track
    #2: the difference between the highest and lowest dB value between the end of fade in and start of fade out
    close = (h5 == None)
    if h5 == None:
        path = "../../msd_dense_subset/dense/" + track[2] + "/" + track[
            3] + "/" + track[4] + "/" + track + ".h5"
        h5 = GETTERS.open_h5_file_read(path)
    loudness_avg = GETTERS.get_loudness(h5)
    loudnesses_interval = GETTERS.get_segments_loudness_max(h5)
    start_segments = GETTERS.get_segments_start(h5)
    start_fade_out = GETTERS.get_start_of_fade_out(h5)
    end_fade_in = GETTERS.get_end_of_fade_in(h5)
    idx = np.where((start_segments > end_fade_in)
                   & (start_segments < start_fade_out))
    if close:
        h5.close()
    #return (loudness_avg, np.var(loudnesses_interval[idx]), min(loudnesses_interval[idx]),  max(loudnesses_interval[idx]))
    return (loudness_avg, np.var(loudnesses_interval[idx]),
            abs(
                max(loudnesses_interval[idx]) -
                abs(min(loudnesses_interval[idx]))))
Exemple #22
0
def fill_attributes(song, songH5File):

    #----------------------------non array attributes-------------------------------
    song.analysisSampleRate = str(
        hdf5_getters.get_analysis_sample_rate(songH5File))
    song.artistDigitalID = str(hdf5_getters.get_artist_7digitalid(songH5File))
    song.artistFamiliarity = str(
        hdf5_getters.get_artist_familiarity(songH5File))
    song.artistHotness = str(hdf5_getters.get_artist_hottness(songH5File))
    song.artistID = str(hdf5_getters.get_artist_id(songH5File))
    song.artistLatitude = str(hdf5_getters.get_artist_latitude(songH5File))
    song.artistLocation = str(hdf5_getters.get_artist_location(songH5File))
    song.artistLongitude = str(hdf5_getters.get_artist_longitude(songH5File))
    song.artistmbID = str(hdf5_getters.get_artist_mbid(songH5File))
    song.artistName = str(hdf5_getters.get_artist_name(songH5File))
    song.artistPlayMeID = str(hdf5_getters.get_artist_playmeid(songH5File))
    song.audioMD5 = str(hdf5_getters.get_audio_md5(songH5File))
    song.danceability = str(hdf5_getters.get_danceability(songH5File))
    song.duration = str(hdf5_getters.get_duration(songH5File))
    song.endOfFadeIn = str(hdf5_getters.get_end_of_fade_in(songH5File))
    song.energy = str(hdf5_getters.get_energy(songH5File))
    song.key = str(hdf5_getters.get_key(songH5File))
    song.keyConfidence = str(hdf5_getters.get_key_confidence(songH5File))
    song.segementsConfidence = str(
        hdf5_getters.get_segments_confidence(songH5File))
    song.segementsConfidence = str(
        hdf5_getters.get_sections_confidence(songH5File))
    song.loudness = str(hdf5_getters.get_loudness(songH5File))
    song.mode = str(hdf5_getters.get_mode(songH5File))
    song.modeConfidence = str(hdf5_getters.get_mode_confidence(songH5File))
    song.release = str(hdf5_getters.get_release(songH5File))
    song.releaseDigitalID = str(
        hdf5_getters.get_release_7digitalid(songH5File))
    song.songHotttnesss = str(hdf5_getters.get_song_hotttnesss(songH5File))
    song.startOfFadeOut = str(hdf5_getters.get_start_of_fade_out(songH5File))
    song.tempo = str(hdf5_getters.get_tempo(songH5File))
    song.timeSignature = str(hdf5_getters.get_time_signature(songH5File))
    song.timeSignatureConfidence = str(
        hdf5_getters.get_time_signature_confidence(songH5File))
    song.title = str(hdf5_getters.get_title(songH5File))
    song.trackID = str(hdf5_getters.get_track_id(songH5File))
    song.trackDigitalID = str(hdf5_getters.get_track_7digitalid(songH5File))
    song.year = str(hdf5_getters.get_year(songH5File))

    #-------------------------------array attributes--------------------------------------
    #array float
    song.beatsStart_mean, song.beatsStart_var = convert_array_to_meanvar(
        hdf5_getters.get_beats_start(songH5File))
    #array float
    song.artistTermsFreq_mean, song.artistTermsFreq_var = convert_array_to_meanvar(
        hdf5_getters.get_artist_terms_freq(songH5File))
    #array float
    song.artistTermsWeight_mean, song.artistTermsWeight_var = convert_array_to_meanvar(
        hdf5_getters.get_artist_terms_weight(songH5File))
    #array int
    song.artistmbTagsCount_mean, song.artistmbTagsCount_var = convert_array_to_meanvar(
        hdf5_getters.get_artist_mbtags_count(songH5File))
    #array float
    song.barsConfidence_mean, song.barsConfidence_var = convert_array_to_meanvar(
        hdf5_getters.get_bars_confidence(songH5File))
    #array float
    song.barsStart_mean, song.barsStart_var = convert_array_to_meanvar(
        hdf5_getters.get_bars_start(songH5File))
    #array float
    song.beatsConfidence_mean, song.beatsConfidence_var = convert_array_to_meanvar(
        hdf5_getters.get_beats_confidence(songH5File))
    #array float
    song.sectionsConfidence_mean, song.sectionsConfidence_var = convert_array_to_meanvar(
        hdf5_getters.get_sections_confidence(songH5File))
    #array float
    song.sectionsStart_mean, song.sectionsStart_var = convert_array_to_meanvar(
        hdf5_getters.get_sections_start(songH5File))
    #array float
    song.segmentsConfidence_mean, song.segmentsConfidence_var = convert_array_to_meanvar(
        hdf5_getters.get_segments_confidence(songH5File))
    #array float
    song.segmentsLoudness_mean, song.segmentsLoudness_var = convert_array_to_meanvar(
        hdf5_getters.get_segments_loudness_max(songH5File))
    #array float
    song.segmentsLoudnessMaxTime_mean, song.segmentsLoudnessMaxTime_var = convert_array_to_meanvar(
        hdf5_getters.get_segments_loudness_max_time(songH5File))
    #array float
    song.segmentsLoudnessMaxStart_mean, song.segmentsLoudnessMaxStart_var = convert_array_to_meanvar(
        hdf5_getters.get_segments_loudness_start(songH5File))
    #array float
    song.segmentsStart_mean, song.segmentsStart_var = convert_array_to_meanvar(
        hdf5_getters.get_segments_start(songH5File))
    #array float
    song.tatumsConfidence_mean, song.tatumsConfidence_var = convert_array_to_meanvar(
        hdf5_getters.get_tatums_confidence(songH5File))
    #array float
    song.tatumsStart_mean, song.tatumsStart_var = convert_array_to_meanvar(
        hdf5_getters.get_tatums_start(songH5File))
    #array2d float
    song.segmentsTimbre_mean, song.segmentsTimbre_var = covert_2darray_to_meanvar(
        hdf5_getters.get_segments_timbre(songH5File))
    #array2d float
    song.segmentsPitches_mean, song.segmentsPitches_var = covert_2darray_to_meanvar(
        hdf5_getters.get_segments_pitches(songH5File))

    #------------------------array string attributes------------------------
    song.similarArtists = convert_array_to_string(
        hdf5_getters.get_similar_artists(songH5File))  #array string
    song.artistTerms = convert_array_to_string(
        hdf5_getters.get_artist_terms(songH5File))  #array string
    song.artistmbTags = convert_array_to_string(
        hdf5_getters.get_artist_mbtags(songH5File))  #array string

    return song
def data_to_flat_file(basedir, ext='.h5'):
    """ This function extracts the information from the tables and creates the flat file. """
    count = 0
    #song counter
    list_to_write = []
    group_index = 0
    row_to_write = ""
    writer = csv.writer(open("complete.csv", "wb"))
    for root, dirs, files in os.walk(basedir):
        files = glob.glob(os.path.join(root, '*' + ext))
        for f in files:
            row = []
            print f
            h5 = hdf5_getters.open_h5_file_read(f)
            title = hdf5_getters.get_title(h5)
            title = title.replace('"', '')
            row.append(title)
            comma = title.find(',')
            if comma != -1:
                print title
                time.sleep(1)
            album = hdf5_getters.get_release(h5)
            album = album.replace('"', '')
            row.append(album)
            comma = album.find(',')
            if comma != -1:
                print album
                time.sleep(1)
            artist_name = hdf5_getters.get_artist_name(h5)
            comma = artist_name.find(',')
            if comma != -1:
                print artist_name
                time.sleep(1)
            artist_name = artist_name.replace('"', '')
            row.append(artist_name)
            duration = hdf5_getters.get_duration(h5)
            row.append(duration)
            samp_rt = hdf5_getters.get_analysis_sample_rate(h5)
            row.append(samp_rt)
            artist_7digitalid = hdf5_getters.get_artist_7digitalid(h5)
            row.append(artist_7digitalid)
            artist_fam = hdf5_getters.get_artist_familiarity(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(artist_fam) == True:
                artist_fam = -1
            row.append(artist_fam)
            artist_hotness = hdf5_getters.get_artist_hotttnesss(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(artist_hotness) == True:
                artist_hotness = -1
            row.append(artist_hotness)
            artist_id = hdf5_getters.get_artist_id(h5)
            row.append(artist_id)
            artist_lat = hdf5_getters.get_artist_latitude(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(artist_lat) == True:
                artist_lat = -1
            row.append(artist_lat)
            artist_loc = hdf5_getters.get_artist_location(h5)
            row.append(artist_loc)
            artist_lon = hdf5_getters.get_artist_longitude(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(artist_lon) == True:
                artist_lon = -1
            row.append(artist_lon)
            artist_mbid = hdf5_getters.get_artist_mbid(h5)
            row.append(artist_mbid)

            #Getting the genre
            art_trm = hdf5_getters.get_artist_terms(h5)
            trm_freq = hdf5_getters.get_artist_terms_freq(h5)
            trn_wght = hdf5_getters.get_artist_terms_weight(h5)
            a_mb_tags = hdf5_getters.get_artist_mbtags(h5)
            genre_indexes = get_genre_indexes(
                trm_freq)  #index of the highest freq
            genre_set = 0  #flag to see if the genre has been set or not
            final_genre = []
            genres_so_far = []
            for i in range(len(genre_indexes)):
                genre_tmp = get_genre(
                    art_trm, genre_indexes[i]
                )  #genre that corresponds to the highest freq
                genres_so_far = genre_dict.get_genre_in_dict(
                    genre_tmp)  #getting the genre from the dictionary
                if len(genres_so_far) != 0:
                    for i in genres_so_far:
                        final_genre.append(i)
                        genre_set = 1

            if genre_set == 1:
                col_num = []
                for i in final_genre:
                    column = int(i)  #getting the column number of the genre
                    col_num.append(column)

                genre_array = genre_columns(col_num)  #genre array
                for i in range(len(
                        genre_array)):  #appending the genre_array to the row
                    row.append(genre_array[i])
            else:
                genre_array = genre_columns(
                    -1
                )  #when there is no genre matched, return an array of [0...0]
                for i in range(len(
                        genre_array)):  #appending the genre_array to the row
                    row.append(genre_array[i])

            artist_pmid = hdf5_getters.get_artist_playmeid(h5)
            row.append(artist_pmid)
            audio_md5 = hdf5_getters.get_audio_md5(h5)
            row.append(audio_md5)
            danceability = hdf5_getters.get_danceability(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(danceability) == True:
                danceability = -1
            row.append(danceability)
            end_fade_in = hdf5_getters.get_end_of_fade_in(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(end_fade_in) == True:
                end_fade_in = -1
            row.append(end_fade_in)
            energy = hdf5_getters.get_energy(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(energy) == True:
                energy = -1
            row.append(energy)
            song_key = hdf5_getters.get_key(h5)
            row.append(song_key)
            key_c = hdf5_getters.get_key_confidence(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(key_c) == True:
                key_c = -1
            row.append(key_c)
            loudness = hdf5_getters.get_loudness(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(loudness) == True:
                loudness = -1
            row.append(loudness)
            mode = hdf5_getters.get_mode(h5)
            row.append(mode)
            mode_conf = hdf5_getters.get_mode_confidence(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(mode_conf) == True:
                mode_conf = -1
            row.append(mode_conf)
            release_7digitalid = hdf5_getters.get_release_7digitalid(h5)
            row.append(release_7digitalid)
            song_hot = hdf5_getters.get_song_hotttnesss(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(song_hot) == True:
                song_hot = -1
            row.append(song_hot)
            song_id = hdf5_getters.get_song_id(h5)
            row.append(song_id)
            start_fade_out = hdf5_getters.get_start_of_fade_out(h5)
            row.append(start_fade_out)
            tempo = hdf5_getters.get_tempo(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(tempo) == True:
                tempo = -1
            row.append(tempo)
            time_sig = hdf5_getters.get_time_signature(h5)
            row.append(time_sig)
            time_sig_c = hdf5_getters.get_time_signature_confidence(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(time_sig_c) == True:
                time_sig_c = -1
            row.append(time_sig_c)
            track_id = hdf5_getters.get_track_id(h5)
            row.append(track_id)
            track_7digitalid = hdf5_getters.get_track_7digitalid(h5)
            row.append(track_7digitalid)
            year = hdf5_getters.get_year(h5)
            row.append(year)
            bars_c = hdf5_getters.get_bars_confidence(h5)
            bars_start = hdf5_getters.get_bars_start(h5)
            row_bars_padding = padding(
                245
            )  #this is the array that will be attached at the end of th row

            #--------------bars---------------"
            gral_info = []
            gral_info = row[:]
            empty = []
            for i, item in enumerate(bars_c):
                row.append(group_index)
                row.append(i)
                row.append(bars_c[i])
                bars_c_avg = get_avg(bars_c)
                row.append(bars_c_avg)
                bars_c_max = get_max(bars_c)
                row.append(bars_c_max)
                bars_c_min = get_min(bars_c)
                row.append(bars_c_min)
                bars_c_stddev = get_stddev(bars_c)
                row.append(bars_c_stddev)
                bars_c_count = get_count(bars_c)
                row.append(bars_c_count)
                bars_c_sum = get_sum(bars_c)
                row.append(bars_c_sum)
                row.append(bars_start[i])
                bars_start_avg = get_avg(bars_start)
                row.append(bars_start_avg)
                bars_start_max = get_max(bars_start)
                row.append(bars_start_max)
                bars_start_min = get_min(bars_start)
                row.append(bars_start_min)
                bars_start_stddev = get_stddev(bars_start)
                row.append(bars_start_stddev)
                bars_start_count = get_count(bars_start)
                row.append(bars_start_count)
                bars_start_sum = get_sum(bars_start)
                row.append(bars_start_sum)
                for i in row_bars_padding:
                    row.append(i)

                writer.writerow(row)
                row = []
                row = gral_info[:]

    #--------beats---------------"
            beats_c = hdf5_getters.get_beats_confidence(h5)
            group_index = 1
            row = []
            row = gral_info[:]
            row_front = padding(
                14)  #blanks left in front of the row(empty spaces for bars)
            row_beats_padding = padding(231)
            for i, item in enumerate(beats_c):
                row.append(group_index)
                row.append(i)
                for index in row_front:  #padding blanks in front of the beats
                    row.append(index)

                row.append(beats_c[i])
                beats_c_avg = get_avg(beats_c)
                row.append(beats_c_avg)
                beats_c_max = get_max(beats_c)
                row.append(beats_c_max)
                beats_c_min = get_min(beats_c)
                row.append(beats_c_min)
                beats_c_stddev = get_stddev(beats_c)
                row.append(beats_c_stddev)
                beats_c_count = get_count(beats_c)
                row.append(beats_c_count)
                beats_c_sum = get_sum(beats_c)
                row.append(beats_c_sum)
                beats_start = hdf5_getters.get_beats_start(h5)
                row.append(beats_start[i])
                beats_start_avg = get_avg(beats_start)
                row.append(beats_start_avg)
                beats_start_max = get_max(beats_start)
                row.append(beats_start_max)
                beats_start_min = get_min(beats_start)
                row.append(beats_start_min)
                beats_start_stddev = get_stddev(beats_start)
                row.append(beats_start_stddev)
                beats_start_count = get_count(beats_start)
                row.append(beats_start_count)
                beats_start_sum = get_sum(beats_start)
                row.append(beats_start_sum)
                for i in row_beats_padding:
                    row.append(i)

                writer.writerow(row)
                row = []
                row = gral_info[:]

    # "--------sections---------------"
            row_sec_padding = padding(
                217)  #blank spaces left at the end of the row
            sec_c = hdf5_getters.get_sections_confidence(h5)
            group_index = 2
            row = []
            row = gral_info[:]
            row_front = padding(
                28)  #blank spaces left in front(empty spaces for bars,beats)
            for i, item in enumerate(sec_c):
                row.append(group_index)
                row.append(i)
                for index in row_front:  #padding blanks in front of the sections
                    row.append(index)

                row.append(sec_c[i])
                sec_c_avg = get_avg(sec_c)
                row.append(sec_c_avg)
                sec_c_max = get_max(sec_c)
                row.append(sec_c_max)
                sec_c_min = get_min(sec_c)
                row.append(sec_c_min)
                sec_c_stddev = get_stddev(sec_c)
                row.append(sec_c_stddev)
                sec_c_count = get_count(sec_c)
                row.append(sec_c_count)
                sec_c_sum = get_sum(sec_c)
                row.append(sec_c_sum)
                sec_start = hdf5_getters.get_sections_start(h5)
                row.append(sec_start[i])
                sec_start_avg = get_avg(sec_start)
                row.append(sec_start_avg)
                sec_start_max = get_max(sec_start)
                row.append(sec_start_max)
                sec_start_min = get_min(sec_start)
                row.append(sec_start_min)
                sec_start_stddev = get_stddev(sec_start)
                row.append(sec_start_stddev)
                sec_start_count = get_count(sec_start)
                row.append(sec_start_count)
                sec_start_sum = get_sum(sec_start)
                row.append(sec_start_sum)
                for i in row_sec_padding:  #appending the blank spaces at the end of the row
                    row.append(i)

                writer.writerow(row)
                row = []
                row = gral_info[:]

    #--------segments---------------"
            row_seg_padding = padding(182)  #blank spaces at the end of the row
            row_front = padding(42)  #blank spaces left in front of segments
            seg_c = hdf5_getters.get_segments_confidence(h5)
            group_index = 3
            row = []
            row = gral_info[:]
            for i, item in enumerate(seg_c):
                row.append(group_index)
                row.append(i)
                for index in row_front:  #padding blanks in front of the segments
                    row.append(index)

                row.append(seg_c[i])
                seg_c_avg = get_avg(seg_c)
                row.append(seg_c_avg)
                seg_c_max = get_max(seg_c)
                row.append(seg_c_max)
                seg_c_min = get_min(seg_c)
                row.append(seg_c_min)
                seg_c_stddev = get_stddev(seg_c)
                row.append(seg_c_stddev)
                seg_c_count = get_count(seg_c)
                row.append(seg_c_count)
                seg_c_sum = get_sum(seg_c)
                row.append(seg_c_sum)
                seg_loud_max = hdf5_getters.get_segments_loudness_max(h5)
                row.append(seg_loud_max[i])
                seg_loud_max_avg = get_avg(seg_loud_max)
                row.append(seg_loud_max_avg)
                seg_loud_max_max = get_max(seg_loud_max)
                row.append(seg_loud_max_max)
                seg_loud_max_min = get_min(seg_loud_max)
                row.append(seg_loud_max_min)
                seg_loud_max_stddev = get_stddev(seg_loud_max)
                row.append(seg_loud_max_stddev)
                seg_loud_max_count = get_count(seg_loud_max)
                row.append(seg_loud_max_count)
                seg_loud_max_sum = get_sum(seg_loud_max)
                row.append(seg_loud_max_sum)
                seg_loud_max_time = hdf5_getters.get_segments_loudness_max_time(
                    h5)
                row.append(seg_loud_max_time[i])
                seg_loud_max_time_avg = get_avg(seg_loud_max_time)
                row.append(seg_loud_max_time_avg)
                seg_loud_max_time_max = get_max(seg_loud_max_time)
                row.append(seg_loud_max_time_max)
                seg_loud_max_time_min = get_min(seg_loud_max_time)
                row.append(seg_loud_max_time_min)
                seg_loud_max_time_stddev = get_stddev(seg_loud_max_time)
                row.append(seg_loud_max_time_stddev)
                seg_loud_max_time_count = get_count(seg_loud_max_time)
                row.append(seg_loud_max_time_count)
                seg_loud_max_time_sum = get_sum(seg_loud_max_time)
                row.append(seg_loud_max_time_sum)
                seg_loud_start = hdf5_getters.get_segments_loudness_start(h5)
                row.append(seg_loud_start[i])
                seg_loud_start_avg = get_avg(seg_loud_start)
                row.append(seg_loud_start_avg)
                seg_loud_start_max = get_max(seg_loud_start)
                row.append(seg_loud_start_max)
                seg_loud_start_min = get_min(seg_loud_start)
                row.append(seg_loud_start_min)
                seg_loud_start_stddev = get_stddev(seg_loud_start)
                row.append(seg_loud_start_stddev)
                seg_loud_start_count = get_count(seg_loud_start)
                row.append(seg_loud_start_count)
                seg_loud_start_sum = get_sum(seg_loud_start)
                row.append(seg_loud_start_sum)
                seg_start = hdf5_getters.get_segments_start(h5)
                row.append(seg_start[i])
                seg_start_avg = get_avg(seg_start)
                row.append(seg_start_avg)
                seg_start_max = get_max(seg_start)
                row.append(seg_start_max)
                seg_start_min = get_min(seg_start)
                row.append(seg_start_min)
                seg_start_stddev = get_stddev(seg_start)
                row.append(seg_start_stddev)
                seg_start_count = get_count(seg_start)
                row.append(seg_start_count)
                seg_start_sum = get_sum(seg_start)
                row.append(seg_start_sum)
                for i in row_seg_padding:  #appending blank spaces at the end of the row
                    row.append(i)

                writer.writerow(row)
                row = []
                row = gral_info[:]

            #----------segments pitch and timbre---------------"
            row_seg2_padding = padding(
                14)  #blank spaces left at the end of the row
            row_front = padding(
                77)  #blank spaces left at the front of the segments and timbre
            seg_pitch = hdf5_getters.get_segments_pitches(h5)
            transpose_pitch = seg_pitch.transpose(
            )  #this is to tranpose the matrix,so we can have 12 rows
            group_index = 4
            row = []
            row = gral_info[:]
            for i, item in enumerate(transpose_pitch[0]):
                row.append(group_index)
                row.append(i)
                for index in row_front:  #padding blanks in front of segments and timbre
                    row.append(index)

                row.append(transpose_pitch[0][i])
                seg_pitch_avg = get_avg(transpose_pitch[0])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[0])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[0])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[0])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[0])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[0])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[1][i])
                seg_pitch_avg = get_avg(transpose_pitch[1])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[1])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[1])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[1])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[1])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[1])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[2][i])
                seg_pitch_avg = get_avg(transpose_pitch[2])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[2])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[2])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[2])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[2])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[2])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[3][i])
                seg_pitch_avg = get_avg(transpose_pitch[3])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[3])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[3])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[3])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[3])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[3])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[4][i])
                seg_pitch_avg = get_avg(transpose_pitch[4])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[4])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[4])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[4])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[4])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[4])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[5][i])
                seg_pitch_avg = get_avg(transpose_pitch[5])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[5])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[5])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[5])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[5])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[5])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[6][i])
                seg_pitch_avg = get_avg(transpose_pitch[6])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[6])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[6])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[6])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[6])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[6])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[7][i])
                seg_pitch_avg = get_avg(transpose_pitch[7])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[7])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[7])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[7])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[7])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[7])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[8][i])
                seg_pitch_avg = get_avg(transpose_pitch[8])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[8])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[8])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[8])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[8])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[8])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[9][i])
                seg_pitch_avg = get_avg(transpose_pitch[9])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[9])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[9])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[9])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[9])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[9])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[10][i])
                seg_pitch_avg = get_avg(transpose_pitch[10])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[10])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[10])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[10])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[10])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[10])
                row.append(seg_pitch_sum)
                row.append(transpose_pitch[11][i])
                seg_pitch_avg = get_avg(transpose_pitch[11])
                row.append(seg_pitch_avg)
                seg_pitch_max = get_max(transpose_pitch[11])
                row.append(seg_pitch_max)
                seg_pitch_min = get_min(transpose_pitch[11])
                row.append(seg_pitch_min)
                seg_pitch_stddev = get_stddev(transpose_pitch[11])
                row.append(seg_pitch_stddev)
                seg_pitch_count = get_count(transpose_pitch[11])
                row.append(seg_pitch_count)
                seg_pitch_sum = get_sum(transpose_pitch[11])
                row.append(seg_pitch_sum)
                #timbre arrays
                seg_timbre = hdf5_getters.get_segments_timbre(h5)
                transpose_timbre = seg_pitch.transpose(
                )  #tranposing matrix, to have 12 rows
                row.append(transpose_timbre[0][i])
                seg_timbre_avg = get_avg(transpose_timbre[0])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[0])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[0])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[0])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[0])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[0])
                row.append(seg_timbre_sum)
                row.append(transpose_timbre[1][i])
                seg_timbre_avg = get_avg(transpose_timbre[1])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[1])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[1])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[1])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[1])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[1])
                row.append(seg_timbre_sum)
                row.append(transpose_timbre[2][i])
                seg_timbre_avg = get_avg(transpose_timbre[2])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[2])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[2])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[2])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[2])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[2])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[3][i])
                seg_timbre_avg = get_avg(transpose_timbre[3])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[3])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[3])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[3])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[3])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[3])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[4][i])
                seg_timbre_avg = get_avg(transpose_timbre[4])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[4])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[4])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[4])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[4])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[4])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[5][i])
                seg_timbre_avg = get_avg(transpose_timbre[5])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[5])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[5])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[5])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[5])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[5])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[6][i])
                seg_timbre_avg = get_avg(transpose_timbre[6])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[6])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[6])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[6])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[6])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[6])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[7][i])
                seg_timbre_avg = get_avg(transpose_timbre[7])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[7])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[7])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[7])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[7])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[7])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[8][i])
                seg_timbre_avg = get_avg(transpose_timbre[8])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[8])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[8])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[8])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[8])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[8])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[9][i])
                seg_timbre_avg = get_avg(transpose_timbre[9])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[9])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[9])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[9])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[9])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[9])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[10][i])
                seg_timbre_avg = get_avg(transpose_timbre[10])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[10])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[10])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[10])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[10])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[10])
                row.append(seg_timbre_sum)

                row.append(transpose_timbre[11][i])
                seg_timbre_avg = get_avg(transpose_timbre[11])
                row.append(seg_timbre_avg)
                seg_timbre_max = get_max(transpose_timbre[11])
                row.append(seg_timbre_max)
                seg_timbre_min = get_min(transpose_timbre[11])
                row.append(seg_timbre_min)
                seg_timbre_stddev = get_stddev(transpose_timbre[11])
                row.append(seg_timbre_stddev)
                seg_timbre_count = get_count(transpose_timbre[11])
                row.append(seg_timbre_count)
                seg_timbre_sum = get_sum(transpose_timbre[11])
                row.append(seg_timbre_sum)
                for item in row_seg2_padding:
                    row.append(item)
                writer.writerow(row)
                row = []
                row = gral_info[:]

    # "--------tatums---------------"
            tatms_c = hdf5_getters.get_tatums_confidence(h5)
            group_index = 5
            row_front = padding(245)  #blank spaces left in front of tatums
            row = []
            row = gral_info[:]
            for i, item in enumerate(tatms_c):
                row.append(group_index)
                row.append(i)
                for item in row_front:  #appending blank spaces at the front of the row
                    row.append(item)

                row.append(tatms_c[i])
                tatms_c_avg = get_avg(tatms_c)
                row.append(tatms_c_avg)
                tatms_c_max = get_max(tatms_c)
                row.append(tatms_c_max)
                tatms_c_min = get_min(tatms_c)
                row.append(tatms_c_min)
                tatms_c_stddev = get_stddev(tatms_c)
                row.append(tatms_c_stddev)
                tatms_c_count = get_count(tatms_c)
                row.append(tatms_c_count)
                tatms_c_sum = get_sum(tatms_c)
                row.append(tatms_c_sum)
                tatms_start = hdf5_getters.get_tatums_start(h5)
                row.append(tatms_start[i])
                tatms_start_avg = get_avg(tatms_start)
                row.append(tatms_start_avg)
                tatms_start_max = get_max(tatms_start)
                row.append(tatms_start_max)
                tatms_start_min = get_min(tatms_start)
                row.append(tatms_start_min)
                tatms_start_stddev = get_stddev(tatms_start)
                row.append(tatms_start_stddev)
                tatms_start_count = get_count(tatms_start)
                row.append(tatms_start_count)
                tatms_start_sum = get_sum(tatms_start)
                row.append(tatms_start_sum)
                writer.writerow(row)
                row = []
                row = gral_info[:]

            transpose_pitch = seg_pitch.transpose(
            )  #this is to tranpose the matrix,so we can have 12 rows
            #arrays containing the aggregate values of the 12 rows
            seg_pitch_avg = []
            seg_pitch_max = []
            seg_pitch_min = []
            seg_pitch_stddev = []
            seg_pitch_count = []
            seg_pitch_sum = []
            i = 0
            #Getting the aggregate values in the pitches array
            for row in transpose_pitch:
                seg_pitch_avg.append(get_avg(row))
                seg_pitch_max.append(get_max(row))
                seg_pitch_min.append(get_min(row))
                seg_pitch_stddev.append(get_stddev(row))
                seg_pitch_count.append(get_count(row))
                seg_pitch_sum.append(get_sum(row))
                i = i + 1

            #extracting information from the timbre array
            transpose_timbre = seg_pitch.transpose(
            )  #tranposing matrix, to have 12 rows
            #arrays containing the aggregate values of the 12 rows
            seg_timbre_avg = []
            seg_timbre_max = []
            seg_timbre_min = []
            seg_timbre_stddev = []
            seg_timbre_count = []
            seg_timbre_sum = []
            i = 0
            for row in transpose_timbre:
                seg_timbre_avg.append(get_avg(row))
                seg_timbre_max.append(get_max(row))
                seg_timbre_min.append(get_min(row))
                seg_timbre_stddev.append(get_stddev(row))
                seg_timbre_count.append(get_count(row))
                seg_timbre_sum.append(get_sum(row))
                i = i + 1

            h5.close()
            count = count + 1
            print count
Exemple #24
0
max_duration = 20  # in seconds
original.crop(0, max_duration * original.fs)

wsize = 1024
tstep = 512

# Get the magnitude spectrum for the given audio file
learn_specs = features.get_stft(original.data, wsize, tstep)
learn_specs = learn_specs.T
# Read the features in the h5 file
h5 = hdf5_getters.open_h5_file_read(h5file)
timbre = hdf5_getters.get_segments_timbre(h5)
loudness_start = hdf5_getters.get_segments_loudness_start(h5)
C = hdf5_getters.get_segments_pitches(h5)
segments_all = hdf5_getters.get_segments_start(h5)

learn_feats_all = np.hstack(
    (timbre, loudness_start.reshape((loudness_start.shape[0], 1)), C))

max_seg_idx = np.where(segments_all < max_duration)[0][-1]
segments = segments_all[:max_seg_idx]

# now we have a 30202 x 512 matrix of magnitude spectrums and a 1147 x1 vector or time positions for
# them.
# How can we combine the spectrums in order to resynthesize the audio?

ref_time = np.arange(0.,
                     float(original.length) / float(original.fs),
                     float(tstep) / float(original.fs))
                mode_confidence = hdf5_getters.get_mode_confidence(hdf)
                start_of_fade_out = hdf5_getters.get_start_of_fade_out(hdf)
                tempo = hdf5_getters.get_tempo(hdf)
                time_signature = hdf5_getters.get_time_signature(hdf)
                time_signature_confidence = hdf5_getters.get_time_signature_confidence(
                    hdf)
                track_id = hdf5_getters.get_track_id(hdf)
                year = hdf5_getters.get_year(hdf)

                similar_artists = hdf5_getters.get_similar_artists(hdf)

                artist_terms = hdf5_getters.get_artist_terms(hdf)
                artist_terms_freq = hdf5_getters.get_artist_terms_freq(hdf)
                artist_terms_weight = hdf5_getters.get_artist_terms_weight(hdf)

                segments_start = hdf5_getters.get_segments_start(hdf)

                segments_confidence = hdf5_getters.get_segments_confidence(hdf)
                segments_pitches = hdf5_getters.get_segments_pitches(hdf)
                segments_timbre = hdf5_getters.get_segments_timbre(hdf)

                segments_loudness_max = hdf5_getters.get_segments_loudness_max(
                    hdf)
                segments_loudness_max_time = hdf5_getters.get_segments_loudness_max_time(
                    hdf)
                segments_loudness_start = hdf5_getters.get_segments_loudness_start(
                    hdf)

                sections_start = hdf5_getters.get_sections_start(hdf)
                sections_confidence = hdf5_getters.get_sections_confidence(hdf)
def get_all_data(dir,ext,outDir):
    for root, dirs, files in os.walk(dir):
	files = glob.glob(os.path.join(root,'*'+ext))
        for f in files:
		h5 = get.open_h5_file_read(f)
		keys = {}
		
		keys['trackId'] = get.get_track_id(h5)
		#keys['analysisSampleRate'] = get.get_analysis_sample_rate(h5)

		#keys['artistMbTags'] = arrToStr(get.get_artist_mbtags(h5))
		#keys['artistMbId'] = get.get_artist_mbid(h5)
		#keys['artistName'] = get.get_artist_name(h5)
		#keys['artistPlayMeId'] = get.get_artist_playmeid(h5)
		
		#keys['artistTermsWeightArr'] = arrToStr(get.get_artist_terms_weight(h5)) #array
                #keys['artistTermsFreqArr'] = arrToStr(get.get_artist_terms_freq(h5))  #array
                #keys['artistTermsArr'] = arrToStr(get.get_artist_terms(h5)) # array"""

		#keys['audioMd5'] = get.get_audio_md5(h5)
		
                #keys['barsConfidence'] = arrToStr(get.get_bars_confidence(h5)) #
		#keys['barsStart'] = arrToStr(get.get_bars_start(h5)) #

		#keys['beatsConfidence'] = arrToStr(get.get_beats_confidence(h5)) #
		#keys['beatsStart']= arrToStr(get.get_beats_start(h5)) #

		#keys['danceability'] = get.get_danceability(h5)
		#keys['duration'] = get.get_duration(h5)
		#keys['endofFadeIn'] = get.get_end_of_fade_in(h5)
		#keys['energy'] = get.get_energy(h5)
		#keys['key'] = get.get_key(h5)
		keys['loudness'] = get.get_loudness(h5)
		#keys['keyConfidence'] = get.get_key_confidence(h5)
		#keys['mode'] = get.get_mode(h5)
		#keys['release'] = get.get_release(h5)
		#keys['release7DigitalId'] = get.get_release_7digitalid(h5)
		#keys['modeConfidence'] = get.get_mode_confidence(h5)

		#keys['sectionsConfidenceArr'] = arrToStr(get.get_sections_confidence(h5)) #
		#keys['sectionsStartArr'] = arrToStr(get.get_sections_start(h5))
	
		#Segments data begin
		keys['segmentsStartArr'] = arrToStr(get.get_segments_start(h5)) #
		#keys['segmentsConfidenceArr'] = arrToStr(get.get_segments_confidence(h5)) #
		#keys['segmentsLoudnessMaxArr'] = arrToStr(get.get_segments_loudness_max_time(h5)) #
		#keys['segmentsLoudnessMaxTimeArr'] = arrToStr(get.get_segments_loudness_max_time(h5)) #
		#keys['segmentsLoudnessStartArr'] = arrToStr(get.get_segments_loudness_start(h5)) #
		keys['segmentsPitchesArr'] = arrToStr((np.array(get.get_segments_pitches(h5))).flatten()) #
		keys['segmentsTimbreArr'] = arrToStr((np.array(get.get_segments_timbre(h5))).flatten()) #Timbre data	
		#Segments data End
		#keys['similarArtists'] = arrToStr(get.get_similar_artists(h5))	
		#keys['songHotness'] = get.get_song_hotttnesss(h5)
		keys['songId'] = get.get_song_id(h5)	
		#keys['startOfFadeOut'] = get.get_start_of_fade_out(h5)
                #keys['tatumsConfidence'] = arrToStr(get.get_tatums_confidence(h5)) #
		#keys['tatumsStart'] = arrToStr(get.get_tatums_start(h5)) #
		#keys['tempo'] = get.get_tempo(h5)
		#keys['timeSignature'] = get.get_time_signature(h5)
                #keys['timeSignatureConfidence'] = get.get_time_signature_confidence(h5)
                keys['title'] = get.get_title(h5)
                #keys['track7DigitalId'] = get.get_track_7digitalid(h5)
                #keys['year'] = get.get_year(h5)
	
            	
		with open(outDir,'a') as f:
			for key in ordering:
				f.write('{0}\t'.format(keys[key]))
			f.write('\n')
		"""
		for key in keys:
			print key 
			print "--->  " 
			print keys[key]
		"""
		h5.close()
Exemple #27
0
def convert_to_csv():
    i = 0

    data = []
    target = []
    count = 0

    with open('data_timbre.csv', 'w+') as f:
        with open('target_timbre.csv', 'w+') as f2:
            writer = csv.writer(f)
            target_writer = csv.writer(f2)

            for root, dirs, files in os.walk(msd_subset_data_path):
                files = glob.glob(os.path.join(root, '*.h5'))
                for f in sorted(files):
                    try:  # Opening is very prone to causing exceptions, we'll just skip file if exception is thrown
                        h5 = getter.open_h5_file_read(f)
                        year = getter.get_year(h5)
                        if year:
                            count += 1

                            analysis_file = open('current_analysis_status.txt',
                                                 'a')
                            update = "Currently at file name: " + str(
                                f) + " and at number " + str(count) + "\n"
                            analysis_file.write(update)
                            print update
                            analysis_file.close()

                            target.append([year])
                            row = []

                            timbre = getter.get_segments_timbre(h5)
                            segstarts = getter.get_segments_start(h5)
                            btstarts = getter.get_beats_start(h5)
                            duration = getter.get_duration(h5)
                            end_of_fade_in = getter.get_end_of_fade_in(h5)
                            key = getter.get_key(h5)
                            key_confidence = getter.get_key_confidence(h5)
                            loudness = getter.get_loudness(h5)
                            start_of_fade_out = getter.get_start_of_fade_out(
                                h5)
                            tempo = getter.get_tempo(h5)
                            time_signature = getter.get_time_signature(h5)
                            time_signature_confidence = getter.get_time_signature_confidence(
                                h5)

                            h5.close()  # VERY IMPORTANT

                            segstarts = np.array(segstarts).flatten()
                            btstarts = np.array(btstarts).flatten()

                            bttimbre = align_feats(
                                timbre.T, segstarts, btstarts, duration,
                                end_of_fade_in, key, key_confidence, loudness,
                                start_of_fade_out, tempo, time_signature,
                                time_signature_confidence)

                            if bttimbre is None:
                                continue  # Skip this track, some features broken

                            npicks, winsize, finaldim = 12, 12, 144  # Calculated by 12 * 12. 12 is fixed as number of dimensions.
                            processed_feats = extract_and_compress(
                                bttimbre, npicks, winsize, finaldim)
                            n_p_feats = processed_feats.shape[0]

                            if processed_feats is None:
                                continue  # Skip this track, some features broken

                            row = processed_feats.flatten()
                            if len(
                                    row
                            ) != 12 * 144:  # 12 dimensions * 144 features per dimension
                                continue  # Not enough features

                            year_row = np.array([year])

                            if row.any() and year_row.any():
                                writer.writerow(row)
                                target_writer.writerow(year_row)

                            i += 1

                        else:
                            h5.close()

                    except Exception:
                        pass

    print 'Finished!'

    analysis_file = open('current_analysis_status.txt', 'a')
    analysis_file.write('Done!')
    analysis_file.close()

    return
Exemple #28
0
wintime = 0.016
steptime = 0.004
sr = 32000

# Load a random file info
subset_path = '/home/manu/workspace/databases/MillionSongSubset/data/'
import hdf5_getters
#h5 = hdf5_getters.open_h5_file_read(subset_path+'A/V/A/TRAVAAN128F9359AAE.h5')
output_dir = '/sons/rwc/Learn/hdf5/'
output = output_dir + 'rwc-g-m01_1.h5'
h5 = hdf5_getters.open_h5_file_read(output)

duration = hdf5_getters.get_duration(h5)
title = hdf5_getters.get_title(h5)
n_segments_start = hdf5_getters.get_segments_start(h5)
n_segment = n_segments_start.shape[0]
artist_name = hdf5_getters.get_artist_name(h5)
timbre = hdf5_getters.get_segments_timbre(h5)
loudness = hdf5_getters.get_segments_loudness_start(h5)
C = hdf5_getters.get_segments_pitches(h5)
beattimes = hdf5_getters.get_segments_start(h5)

digital_id = hdf5_getters.get_track_7digitalid(h5)

# guess the duration of a segment
seg_dur = n_segments_start[1:] - n_segments_start[0:-1]

# Load the learning parts
from scipy.io import loadmat
full_path = '/home/manu/workspace/audio-sketch/matlab/'
 def get_segments_start(self):
     if self.h5 == None: self.open()
     return hdf5_getters.get_segments_start(self.h5)
def classify(h5):
    output_array = {}
    # duration
    duration = hdf5_getters.get_duration(h5)
    output_array["duration"] = duration  ### ADDED VALUE TO ARRAY
    # number of bars
    bars = hdf5_getters.get_bars_start(h5)
    num_bars = len(bars)
    output_array["num_bars"] = num_bars  ### ADDED VALUE TO ARRAY
    # mean and variance in bar length
    bar_length = numpy.ediff1d(bars)
    variance_bar_length = numpy.var(bar_length)
    output_array[
        "variance_bar_length"] = variance_bar_length  ### ADDED VALUE TO ARRAY
    # number of beats
    beats = hdf5_getters.get_beats_start(h5)
    num_beats = len(beats)
    output_array["num_beats"] = num_beats  ### ADDED VALUE TO ARRAY
    # mean and variance in beats length
    beats_length = numpy.ediff1d(beats)
    variance_beats_length = numpy.var(bar_length)
    output_array[
        "variance_beats_length"] = variance_beats_length  ### ADDED VALUE TO ARRAY
    # danceability
    danceability = hdf5_getters.get_danceability(h5)
    output_array["danceability"] = danceability  ### ADDED VALUE TO ARRAY
    # end of fade in
    end_of_fade_in = hdf5_getters.get_end_of_fade_in(h5)
    output_array["end_of_fade_in"] = end_of_fade_in  ### ADDED VALUE TO ARRAY
    # energy
    energy = hdf5_getters.get_energy(h5)
    output_array["energy"] = energy  ### ADDED VALUE TO ARRAY
    # key
    key = hdf5_getters.get_key(h5)
    output_array["key"] = int(key)  ### ADDED VALUE TO ARRAY
    # loudness
    loudness = hdf5_getters.get_loudness(h5)
    output_array["loudness"] = loudness  ### ADDED VALUE TO ARRAY
    # mode
    mode = hdf5_getters.get_mode(h5)
    output_array["mode"] = int(mode)  ### ADDED VALUE TO ARRAY
    # number sections
    sections = hdf5_getters.get_sections_start(h5)
    num_sections = len(sections)
    output_array["num_sections"] = num_sections  ### ADDED VALUE TO ARRAY
    # mean and variance in sections length
    sections_length = numpy.ediff1d(sections)
    variance_sections_length = numpy.var(sections)
    output_array[
        "variance_sections_length"] = variance_sections_length  ### ADDED VALUE TO ARRAY
    # number segments
    segments = hdf5_getters.get_segments_start(h5)
    num_segments = len(segments)
    output_array["num_segments"] = num_segments  ### ADDED VALUE TO ARRAY
    # mean and variance in segments length
    segments_length = numpy.ediff1d(segments)
    variance_segments_length = numpy.var(segments)
    output_array[
        "variance_segments_length"] = variance_segments_length  ### ADDED VALUE TO ARRAY
    # segment loudness max
    segment_loudness_max_array = hdf5_getters.get_segments_loudness_max(h5)
    segment_loudness_max_time_array = hdf5_getters.get_segments_loudness_max_time(
        h5)
    segment_loudness_max_index = 0
    for i in range(len(segment_loudness_max_array)):
        if segment_loudness_max_array[i] > segment_loudness_max_array[
                segment_loudness_max_index]:
            segment_loudness_max_index = i
    segment_loudness_max = segment_loudness_max_array[
        segment_loudness_max_index]
    segment_loudness_max_time = segment_loudness_max_time_array[
        segment_loudness_max_index]
    output_array[
        "segment_loudness_max"] = segment_loudness_max  ### ADDED VALUE TO ARRAY
    output_array[
        "segment_loudness_time"] = segment_loudness_max_time  ### ADDED VALUE TO ARRAY

    # POSSIBLE TODO: use average function instead and weight by segment length
    # segment loudness mean (start)
    segment_loudness_array = hdf5_getters.get_segments_loudness_start(h5)
    segment_loudness_mean = numpy.mean(segment_loudness_array)
    output_array[
        "segment_loudness_mean"] = segment_loudness_mean  ### ADDED VALUE TO ARRAY
    # segment loudness variance (start)
    segment_loudness_variance = numpy.var(segment_loudness_array)
    output_array[
        "segment_loudness_variance"] = segment_loudness_variance  ### ADDED VALUE TO ARRAY
    # segment pitches
    segment_pitches_array = hdf5_getters.get_segments_pitches(h5)
    segment_pitches_mean = numpy.mean(segment_pitches_array, axis=0).tolist()
    output_array["segment_pitches_mean"] = segment_pitches_mean
    # segment pitches variance (start)
    segment_pitches_variance = numpy.var(segment_pitches_array,
                                         axis=0).tolist()
    output_array["segment_pitches_variance"] = segment_pitches_variance
    # segment timbres
    segment_timbres_array = hdf5_getters.get_segments_timbre(h5)
    segment_timbres_mean = numpy.mean(segment_timbres_array, axis=0).tolist()
    output_array["segment_timbres_mean"] = segment_timbres_mean
    # segment timbres variance (start)
    segment_timbres_variance = numpy.var(segment_timbres_array,
                                         axis=0).tolist()
    output_array["segment_timbres_variance"] = segment_timbres_variance
    # hotttnesss
    hottness = hdf5_getters.get_song_hotttnesss(h5, 0)
    output_array["hottness"] = hottness  ### ADDED VALUE TO ARRAY
    # duration-start of fade out
    start_of_fade_out = hdf5_getters.get_start_of_fade_out(h5)
    fade_out = duration - start_of_fade_out
    output_array["fade_out"] = fade_out  ### ADDED VALUE TO ARRAY
    # tatums
    tatums = hdf5_getters.get_tatums_start(h5)
    num_tatums = len(tatums)
    output_array["num_tatums"] = num_tatums  ### ADDED VALUE TO ARRAY
    # mean and variance in tatums length
    tatums_length = numpy.ediff1d(tatums)
    variance_tatums_length = numpy.var(tatums_length)
    output_array[
        "variance_tatums_length"] = variance_tatums_length  ### ADDED VALUE TO ARRAY
    # tempo
    tempo = hdf5_getters.get_tempo(h5)
    output_array["tempo"] = tempo  ### ADDED VALUE TO ARRAY
    # time signature
    time_signature = hdf5_getters.get_time_signature(h5)
    output_array["time_signature"] = int(
        time_signature)  ### ADDED VALUE TO ARRAY
    # year
    year = hdf5_getters.get_year(h5)
    output_array["year"] = int(year)  ### ADDED VALUE TO ARRAY
    # artist terms
    artist_terms = hdf5_getters.get_artist_terms(h5, 0)
    output_array["artist_terms"] = artist_terms.tolist()
    artist_terms_freq = hdf5_getters.get_artist_terms_freq(h5, 0)
    output_array["artist_terms_freq"] = artist_terms_freq.tolist()
    artist_name = hdf5_getters.get_artist_name(h5, 0)
    output_array["artist_name"] = artist_name
    artist_id = hdf5_getters.get_artist_id(h5, 0)
    output_array["artist_id"] = artist_id
    # title
    title = hdf5_getters.get_title(h5, 0)
    output_array["title"] = title

    return output_array
Exemple #31
0
def get_all_files(basedir,ext='.h5') :
    """
    From a root directory, go through all subdirectories
    and find all files with the given extension.
    Return all absolute paths in a list.
    """
    c=0
    title=[]#get_title(h5)
    name=[]#get_artist_name`
    familiarity=[]#get_artist_familiarity()
    artist_hotness=[]#get_artist_hotttnesss
    song_hotness=[]# get_song_hotttnesss
    danceability=[]#get_danceability
    energy=[]#get_energy
    loudness=[]#get_loudness
    tempo=[]#get_tempo
    mode_confidence=[]#get_mode_confidence()
    time_sig_confidence=[]#get_time_signature_confidence()
    no_segments=[]#len(get_segments_start())
    avg_segment_confidence=[]#np.mean(hdf5_getters.get_segments_confidence(h5))
    avg_segment_pitches=[]#np.mean(hdf5_getters.get_segments_pitches(h51))
    no_sections=[]#len(hdf5_getters.get_sections_start())
    avg_sections_confidence=[]#np.mean(hdf5_getters.get_sections_confidence(h51))
    no_beats_start=[]#len(hdf5_getters.get_beats_start(h51))
    avg_beats_confidence=[]#np.mean(hdf5_getters.get_beats_confidence(h51))
    no_bars=[]#len(hdf5_getters.get_bars_start(h5))
    avg_bar_confidence=[]#np.mean(hdf5_getters.get_bars_confidence((h51))
    no_tatums_start=[]#len(hdf5_getters.get_tatums_start(h5))
    avg_tatums_start=[]#np.mean(get_tatums_confidence())
    billboard_presence=[]#returned value from web scraper
    key=[]
    duration=[]
    mode=[]
    target=pd.read_csv('Billboard.csv')
    j=0
    if(not(os.path.isfile('./data.csv'))):
        for root, dirs, files in os.walk(basedir):
            files = glob.glob(os.path.join(root,'*'+ext))
            for f in files :
                h5 = hdf5_getters.open_h5_file_read(f)
                songnme=hdf5_getters.get_title(h5)
                artst=hdf5_getters.get_artist_name(h5)
                for i in range(len(target)):
                    if(target.Title[i]==songnme and target.Name[i]==artst):
                        billboard_presence.append(target.Presence[i])
                        
                        title.append(songnme)
                        name.append(artst)
                        familiarity.append(hdf5_getters.get_artist_familiarity(h5))
                        artist_hotness.append(hdf5_getters.get_artist_hotttnesss(h5))
                        song_hotness.append(hdf5_getters. get_song_hotttnesss(h5))
                        danceability.append(hdf5_getters.get_danceability(h5))
                        key.append(hdf5_getters.get_key(h5))
                        duration.append(hdf5_getters.get_duration(h5))
                        mode.append(hdf5_getters.get_mode(h5))
                        energy.append(hdf5_getters.get_energy(h5))
                        loudness.append(hdf5_getters.get_loudness(h5))
                        tempo.append(hdf5_getters.get_tempo(h5))
                        mode_confidence.append(hdf5_getters.get_mode_confidence(h5))
                        time_sig_confidence.append(hdf5_getters.get_time_signature_confidence(h5))
                        
                        no_segments.append(len(hdf5_getters.get_segments_start(h5)))
                        avg_segment_confidence.append(np.mean(hdf5_getters.get_segments_confidence(h5)))
                        avg_segment_pitches.append(np.mean(hdf5_getters.get_segments_pitches(h5)))
                        no_sections.append(len(hdf5_getters.get_sections_start(h5)))
                        avg_sections_confidence.append(np.mean(hdf5_getters.get_sections_confidence(h5)))
                        no_beats_start.append(len(hdf5_getters.get_beats_start(h5)))
                        avg_beats_confidence.append(np.mean(hdf5_getters.get_beats_confidence(h5)))
                        no_bars.append(len(hdf5_getters.get_bars_start(h5)))
                        avg_bar_confidence.append(np.mean(hdf5_getters.get_bars_confidence(h5)))
                        no_tatums_start.append(len(hdf5_getters.get_tatums_start(h5)))
                        avg_tatums_start.append(np.mean(hdf5_getters.get_tatums_confidence(h5)))
                        
                        j+=1
                        print j #prints the index number of each song, to keep track of the song being saved to the database, and to identify errors.
                        break;
                        
                h5.close()
        print "Created Arrays"             
        df=pd.DataFrame(title,columns=['Title'])
        df['Artist_Name']=name
        df['Familiarity']=familiarity
        df['Hotness']=artist_hotness
        df['Song_hotness']=song_hotness
        df['Danceability']=danceability
        df['energy']=energy
        df['loudness']=loudness
        df['tempo']=tempo
        df['mode_confidence']=mode_confidence
        df['time_sig_confidence']=time_sig_confidence
        df['no_segments']=no_segments
        df['avg_segment_confidence']=avg_segment_confidence
        df['avg_segment_pitches']=avg_segment_pitches
        df['no_sections']=no_sections
        df['avg_sections_confidence']=avg_sections_confidence
        df['no_beats_start']=no_beats_start
        df['avg_beats_confidence']=avg_beats_confidence
        df['no_bars']=no_bars
        df['avg_bar_confidence']=avg_bar_confidence
        df['no_tatums_start']=no_tatums_start
        df['avg_tatums_start']=avg_tatums_start
        df['key']=key
        df['Mode']=mode
        df['duration']=duration
        df['Presence']=billboard_presence
        print df.head()
        print billboard_presence
        df.to_csv("data.csv")
    else:
        df=pd.read_csv('data.csv',index_col=0)
        print "Number of features in the created dataset:",
        print len(df.keys())
        print
    return df
Exemple #32
0
def main(argv):
    if len(argv) != 1:
        print "Specify data directory"
        return
    basedir = argv[0]
    outputFile1 = open('SongCSV.csv', 'w')
    outputFile2 = open('TagsCSV.csv', 'w')
    csvRowString = ""
    csvLabelString = ""
    #################################################
    #if you want to prompt the user for the order of attributes in the csv,
    #leave the prompt boolean set to True
    #else, set 'prompt' to False and set the order of attributes in the 'else'
    #clause
    prompt = False
    #################################################
    if prompt == True:
        while prompt:

            prompt = False

            csvAttributeString = raw_input("\n\nIn what order would you like the colums of the CSV file?\n" +
                "Please delineate with commas. The options are: " +
                "AlbumName, AlbumID, ArtistID, ArtistLatitude, ArtistLocation, ArtistLongitude,"+
                " ArtistName, Danceability, Duration, KeySignature, KeySignatureConfidence, Tempo," +
                " SongID, TimeSignature, TimeSignatureConfidence, Title, and Year.\n\n" +
                "For example, you may write \"Title, Tempo, Duration\"...\n\n" +
                "...or exit by typing 'exit'.\n\n")

            csvAttributeList = re.split('\W+', csvAttributeString)
            for i, v in enumerate(csvAttributeList):
                csvAttributeList[i] = csvAttributeList[i].lower()

            for attribute in csvAttributeList:
                # print "Here is the attribute: " + attribute + " \n"


                if attribute == 'AlbumID'.lower():
                    csvRowString += 'AlbumID'
                elif attribute == 'AlbumName'.lower():
                    csvRowString += 'AlbumName'
                elif attribute == 'ArtistID'.lower():
                    csvRowString += 'ArtistID'
                elif attribute == 'ArtistLatitude'.lower():
                    csvRowString += 'ArtistLatitude'
                elif attribute == 'ArtistLocation'.lower():
                    csvRowString += 'ArtistLocation'
                elif attribute == 'ArtistLongitude'.lower():
                    csvRowString += 'ArtistLongitude'
                elif attribute == 'ArtistName'.lower():
                    csvRowString += 'ArtistName'
                elif attribute == 'Danceability'.lower():
                    csvRowString += 'Danceability'
                elif attribute == 'Duration'.lower():
                    csvRowString += 'Duration'
                elif attribute == 'KeySignature'.lower():
                    csvRowString += 'KeySignature'
                elif attribute == 'KeySignatureConfidence'.lower():
                    csvRowString += 'KeySignatureConfidence'
                elif attribute == 'SongID'.lower():
                    csvRowString += "SongID"
                elif attribute == 'Tempo'.lower():
                    csvRowString += 'Tempo'
                elif attribute == 'TimeSignature'.lower():
                    csvRowString += 'TimeSignature'
                elif attribute == 'TimeSignatureConfidence'.lower():
                    csvRowString += 'TimeSignatureConfidence'
                elif attribute == 'Title'.lower():
                    csvRowString += 'Title'
                elif attribute == 'Year'.lower():
                    csvRowString += 'Year'
                elif attribute == 'Exit'.lower():
                    sys.exit()
                else:
                    prompt = True
                    print "=============="
                    print "I believe there has been an error with the input."
                    print "=============="
                    break

                csvRowString += ","

            lastIndex = len(csvRowString)
            csvRowString = csvRowString[0:lastIndex-1]
            csvRowString += "\n"
            outputFile1.write(csvRowString);
            csvRowString = ""
    #else, if you want to hard code the order of the csv file and not prompt
    #the user,
    else:
        #################################################
        #change the order of the csv file here
        #Default is to list all available attributes (in alphabetical order)
        #csvRowString = ("SongID,AlbumID,AlbumName,ArtistID,ArtistLatitude,ArtistLocation,"+
        #    "ArtistLongitude,ArtistName,Danceability,Duration,KeySignature,"+
        #    "KeySignatureConfidence,Tempo,TimeSignature,TimeSignatureConfidence,"+
        #    "Title,Year")
        csvRowString = ("ArtistFamiliarity,ArtistHotttnesss,"+
            "BarsConfidence,BarsStart,BeatsConfidence,BeatsStart,Duration,"+
            "EndOfFadeIn,Key,KeyConfidence,Loudness,Mode,ModeConfidence,"+
            "SectionsConfidence,SectionsStart,SegmentsConfidence,SegmentsLoudnessMax,"+
            "SegmentsLoudnessMaxTime,SegmentsLoudnessStart,SegmentsStart,"+
            "SongHotttnesss,StartOfFadeOut,TatumsConfidence,TatumsStart,Tempo,TimeSignature,TimeSignatureConfidence,"+
            "SegmentsPitches,SegmentsTimbre,Title,Year,Decade,ArtistMbtags")
        #################################################
        header = str()
        csvAttributeList = re.split('\W+', csvRowString)
        arrayAttributes = ["BarsConfidence","BarsStart","BeatsConfidence","BeatsStart",
                           "SectionsConfidence","SectionsStart","SegmentsConfidence","SegmentsLoudnessMax",
                           "SegmentsLoudnessMaxTime","SegmentsLoudnessStart","SegmentsStart",
                           "TatumsConfidence","TatumsStart"]
        for i, v in enumerate(csvAttributeList):
            csvAttributeList[i] = csvAttributeList[i].lower()
            if(v=="SegmentsPitches"):
                for i in range(90):
                    header = header + "SegmentsPitches" + str(i) + ","
            elif(v=="SegmentsTimbre"):
                for i in range(90):
                    header = header + "SegmentsTimbre" + str(i) + ","
            elif(v in arrayAttributes):
                header = header + v + str(0) + ","
                header = header + v + str(1) + ","
            else:
                header = header + v + ","
        outputFile1.write("SongNumber,");
        #outputFile1.write(csvRowString + "\n");
        outputFile1.write(header + "\n");
        csvRowString = ""

    #################################################


    #Set the basedir here, the root directory from which the search
    #for files stored in a (hierarchical data structure) will originate
    #basedir = "MillionSongSubset/data/A/A/" # "." As the default means the current directory
    ext = ".h5" #Set the extension here. H5 is the extension for HDF5 files.
    #################################################

    #FOR LOOP
    all = sorted(os.walk(basedir))
    for root, dirs, files in all:
        files = sorted(glob.glob(os.path.join(root,'*'+ext)))
        for f in files:
            print f

            songH5File = hdf5_getters.open_h5_file_read(f)
            song = Song(str(hdf5_getters.get_song_id(songH5File)))

            #testDanceability = hdf5_getters.get_danceability(songH5File)
            # print type(testDanceability)
            # print ("Here is the danceability: ") + str(testDanceability)

            song.analysisSampleRate = str(hdf5_getters.get_analysis_sample_rate(songH5File))
            song.artistFamiliarity = str(hdf5_getters.get_artist_familiarity(songH5File))
            song.artistHotttnesss = str(hdf5_getters.get_artist_hotttnesss(songH5File))
            song.artistLatitude = str(hdf5_getters.get_artist_latitude(songH5File))
            song.artistLongitude = str(hdf5_getters.get_artist_longitude(songH5File))
            song.artistMbid = str(hdf5_getters.get_artist_mbid(songH5File))
            song.barsConfidence = np.array(hdf5_getters.get_bars_confidence(songH5File))
            song.barsStart = np.array(hdf5_getters.get_bars_start(songH5File))
            song.beatsConfidence = np.array(hdf5_getters.get_beats_confidence(songH5File))
            song.beatsStart = np.array(hdf5_getters.get_beats_start(songH5File))
            song.danceability = str(hdf5_getters.get_danceability(songH5File))
            song.duration = str(hdf5_getters.get_duration(songH5File))
            song.endOfFadeIn = str(hdf5_getters.get_end_of_fade_in(songH5File))
            song.energy = str(hdf5_getters.get_energy(songH5File))
            song.key = str(hdf5_getters.get_key(songH5File))
            song.keyConfidence = str(hdf5_getters.get_key_confidence(songH5File))
            song.loudness = str(hdf5_getters.get_loudness(songH5File))
            song.mode = str(hdf5_getters.get_mode(songH5File))
            song.modeConfidence = str(hdf5_getters.get_mode_confidence(songH5File))
            song.sectionsConfidence = np.array(hdf5_getters.get_sections_confidence(songH5File))
            song.sectionsStart = np.array(hdf5_getters.get_sections_start(songH5File))
            song.segmentsConfidence = np.array(hdf5_getters.get_segments_confidence(songH5File))
            song.segmentsLoudnessMax = np.array(hdf5_getters.get_segments_loudness_max(songH5File))
            song.segmentsLoudnessMaxTime = np.array(hdf5_getters.get_segments_loudness_max_time(songH5File))
            song.segmentsLoudnessStart = np.array(hdf5_getters.get_segments_loudness_start(songH5File))
            song.segmentsPitches = np.array(hdf5_getters.get_segments_pitches(songH5File))
            song.segmentsStart = np.array(hdf5_getters.get_segments_start(songH5File))
            song.segmentsTimbre = np.array(hdf5_getters.get_segments_timbre(songH5File))
            song.songHotttnesss = str(hdf5_getters.get_song_hotttnesss(songH5File))
            song.startOfFadeOut = str(hdf5_getters.get_start_of_fade_out(songH5File))
            song.tatumsConfidence = np.array(hdf5_getters.get_tatums_confidence(songH5File))
            song.tatumsStart = np.array(hdf5_getters.get_tatums_start(songH5File))
            song.tempo = str(hdf5_getters.get_tempo(songH5File))
            song.timeSignature = str(hdf5_getters.get_time_signature(songH5File))
            song.timeSignatureConfidence = str(hdf5_getters.get_time_signature_confidence(songH5File))
            song.songid = str(hdf5_getters.get_song_id(songH5File))
            song.title = str(hdf5_getters.get_title(songH5File))
            song.year = str(hdf5_getters.get_year(songH5File))
            song.artistMbtags = str(hdf5_getters.get_artist_mbtags(songH5File))
            #print song count
            csvRowString += str(song.songCount) + ","
            csvLabelString += str(song.songCount) + ","
            
            for attribute in csvAttributeList:
                # print "Here is the attribute: " + attribute + " \n"

                if attribute == 'AnalysisSampleRate'.lower():
                    csvRowString += song.analysisSampleRate
                elif attribute == 'ArtistFamiliarity'.lower():
                    csvRowString += song.artistFamiliarity
                elif attribute == 'ArtistHotttnesss'.lower():
                    csvRowString += song.artistHotttnesss
                elif attribute == 'ArtistLatitude'.lower():
                    latitude = song.artistLatitude
                    if latitude == 'nan':
                        latitude = ''
                    csvRowString += latitude
                elif attribute == 'ArtistLongitude'.lower():
                    longitude = song.artistLongitude
                    if longitude == 'nan':
                        longitude = ''
                    csvRowString += longitude
                elif attribute == 'ArtistMbid'.lower():
                    csvRowString += song.artistMbid
                elif attribute == 'BarsConfidence'.lower():
                    arr = song.barsConfidence
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'BarsStart'.lower():
                    arr = song.barsStart
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'BeatsConfidence'.lower():
                    arr = song.beatsConfidence
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'BeatsStart'.lower():
                    arr = song.beatsStart
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'Danceability'.lower():
                    csvRowString += song.danceability
                elif attribute == 'Duration'.lower():
                    csvRowString += song.duration
                elif attribute == 'EndOfFadeIn'.lower():
                    csvRowString += song.endOfFadeIn
                elif attribute == 'Energy'.lower():
                    csvRowString += song.energy
                elif attribute == 'Key'.lower():
                    csvRowString += song.key
                elif attribute == 'KeyConfidence'.lower():
                    csvRowString += song.keyConfidence
                elif attribute == 'Loudness'.lower():
                    csvRowString += song.loudness
                elif attribute == 'Mode'.lower():
                    csvRowString += song.mode
                elif attribute == 'ModeConfidence'.lower():
                    csvRowString += song.modeConfidence
                elif attribute == 'SectionsConfidence'.lower():
                    arr = song.sectionsConfidence
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'SectionsStart'.lower():
                    arr = song.sectionsStart
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'SegmentsConfidence'.lower():
                    arr = song.segmentsConfidence
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'SegmentsLoudnessMax'.lower():
                    arr = song.segmentsLoudnessMax
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'SegmentsLoudnessMaxTime'.lower():
                    arr = song.segmentsLoudnessMaxTime
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'SegmentsLoudnessStart'.lower():
                    arr = song.segmentsLoudnessStart
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'SegmentsStart'.lower():
                    arr = song.segmentsStart
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'SongHotttnesss'.lower():
                    hotttnesss = song.songHotttnesss
                    if hotttnesss == 'nan':
                        hotttnesss = 'NaN'
                    csvRowString += hotttnesss
                elif attribute == 'StartOfFadeOut'.lower():
                    csvRowString += song.startOfFadeOut
                elif attribute == 'TatumsConfidence'.lower():
                    arr = song.tatumsConfidence
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'TatumsStart'.lower():
                    arr = song.tatumsStart
                    if arr.shape[0] == 0:
                        arrmean = ''
                        arrnorm = ''
                    else:
                        arrmean = np.mean(arr)
                        arrnorm = np.linalg.norm(arr)
                    csvRowString += str(arrmean) + ',' + str(arrnorm)
                elif attribute == 'Tempo'.lower():
                    # print "Tempo: " + song.tempo
                    csvRowString += song.tempo
                elif attribute == 'TimeSignature'.lower():
                    csvRowString += song.timeSignature
                elif attribute == 'TimeSignatureConfidence'.lower():
                    # print "time sig conf: " + song.timeSignatureConfidence
                    csvRowString += song.timeSignatureConfidence
                elif attribute == 'SegmentsPitches'.lower():
                    colmean = np.mean(song.segmentsPitches,axis=0)
                    for m in colmean:
                        csvRowString += str(m) + ","
                    cov = np.dot(song.segmentsPitches.T,song.segmentsPitches)
                    utriind = np.triu_indices(cov.shape[0])
                    feats = cov[utriind]
                    for feat in feats:
                        csvRowString += str(feat) + ","
                    lastIndex = len(csvRowString)
                    csvRowString = csvRowString[0:lastIndex-1]
                elif attribute == 'SegmentsTimbre'.lower():
                    colmean = np.mean(song.segmentsTimbre,axis=0)
                    for m in colmean:
                        csvRowString += str(m) + ","
                    cov = np.dot(song.segmentsTimbre.T,song.segmentsTimbre)
                    utriind = np.triu_indices(cov.shape[0])
                    feats = cov[utriind]
                    for feat in feats:
                        csvRowString += str(feat) + ","
                    lastIndex = len(csvRowString)
                    csvRowString = csvRowString[0:lastIndex-1]
                elif attribute == 'SongID'.lower():
                    csvRowString += "\"" + song.id + "\""
                elif attribute == 'Title'.lower():
                    csvRowString += "\"" + song.title + "\""
                elif attribute == 'Year'.lower():
                    csvRowString += song.year
                elif attribute == 'Decade'.lower():
                    yr = song.year
                    if yr > 0:
                        decade = song.year[:-1] + '0'
                    else:
                        decade = '0'
                    csvRowString += decade
                elif attribute == 'ArtistMbtags'.lower():
                    tags = song.artistMbtags[1:-1]
                    tags = "\"" + tags + "\""
                    tags = tags.replace("\n",'')
                    csvRowString += tags
                    tagsarray = shlex.split(tags)
                    for t in tagsarray:
                        csvLabelString += t + ","
                else:
                     csvRowString += "Erm. This didn't work. Error. :( :(\n"

                csvRowString += ","

                '''
                if attribute == 'AlbumID'.lower():
                    csvRowString += song.albumID
                elif attribute == 'AlbumName'.lower():
                    albumName = song.albumName
                    albumName = albumName.replace(',',"")
                    csvRowString += "\"" + albumName + "\""
                elif attribute == 'ArtistID'.lower():
                    csvRowString += "\"" + song.artistID + "\""
                elif attribute == 'ArtistLatitude'.lower():
                    latitude = song.artistLatitude
                    if latitude == 'nan':
                        latitude = ''
                    csvRowString += latitude
                elif attribute == 'ArtistLocation'.lower():
                    location = song.artistLocation
                    location = location.replace(',','')
                    csvRowString += "\"" + location + "\""
                elif attribute == 'ArtistLongitude'.lower():
                    longitude = song.artistLongitude
                    if longitude == 'nan':
                        longitude = ''
                    csvRowString += longitude
                elif attribute == 'ArtistName'.lower():
                    csvRowString += "\"" + song.artistName + "\""
                elif attribute == 'Danceability'.lower():
                    csvRowString += song.danceability
                elif attribute == 'Duration'.lower():
                    csvRowString += song.duration
                elif attribute == 'KeySignature'.lower():
                    csvRowString += song.keySignature
                elif attribute == 'KeySignatureConfidence'.lower():
                    # print "key sig conf: " + song.timeSignatureConfidence
                    csvRowString += song.keySignatureConfidence
                elif attribute == 'SongID'.lower():
                    csvRowString += "\"" + song.id + "\""
                elif attribute == 'Tempo'.lower():
                    # print "Tempo: " + song.tempo
                    csvRowString += song.tempo
                elif attribute == 'TimeSignature'.lower():
                    csvRowString += song.timeSignature
                elif attribute == 'TimeSignatureConfidence'.lower():
                    # print "time sig conf: " + song.timeSignatureConfidence
                    csvRowString += song.timeSignatureConfidence
                elif attribute == 'Title'.lower():
                    csvRowString += "\"" + song.title + "\""
                elif attribute == 'Year'.lower():
                    csvRowString += song.year
                else:
                    csvRowString += "Erm. This didn't work. Error. :( :(\n"

                csvRowString += ","
                '''
            #Remove the final comma from each row in the csv
            lastIndex = len(csvRowString)
            csvRowString = csvRowString[0:lastIndex-1]
            csvRowString += "\n"
            outputFile1.write(csvRowString)
            csvRowString = ""
            
            lastIndex = len(csvLabelString)
            csvLabelString = csvLabelString[0:lastIndex-1]
            csvLabelString += "\n"
            outputFile2.write(csvLabelString)
            csvLabelString = ""

            songH5File.close()

    outputFile1.close()
    outputFile2.close()
Exemple #33
0
def expe_1_synth_from_same_sample():        
    input_dir = '/sons/rwc/Learn/'
    output_dir = '/sons/rwc/Learn/hdf5/'
    
    audiofile = input_dir + 'rwc-g-m01_1.wav'
    h5file = output_dir + 'rwc-g-m01_1.h5'
    
    # load the Echo Nest features
    h5 = hdf5_getters.open_h5_file_read(h5file)
    timbre = hdf5_getters.get_segments_timbre(h5)
    loudness_start = hdf5_getters.get_segments_loudness_start(h5)
    loudness_max = hdf5_getters.get_segments_loudness_max(h5)
    loudness_max_time = hdf5_getters.get_segments_loudness_max_time(h5)
    C = hdf5_getters.get_segments_pitches(h5)
    segments_all = hdf5_getters.get_segments_start(h5)
    
    learn_feats_all = np.hstack((timbre,
                             loudness_start.reshape((loudness_start.shape[0],1)),
                            C))
    
    # Ok That was the best possible case, now let us try to find the nearest neighbors, 
    # get the segment back and resynthesize!
    
    
    learn_duration = 200 # in seconds
    test_start = 200
    test_duration = 5
    
    # Get learning data
    learning = Signal(audiofile, mono=True)
    learning.crop(0, learn_duration*learning.fs)
    
    wsize = 1024
    tstep = 512
    # Get the magnitude spectrum for the given audio file
    learn_specs = features.get_stft(learning.data, wsize, tstep)
    learn_specs = learn_specs.T
    
    max_l_seg_idx = np.where(segments_all < learn_duration)[0][-1]
    l_segments = segments_all[:max_l_seg_idx]
    l_segment_lengths = (l_segments[1:] - l_segments[0:-1])*learning.fs
    
    
    learn_feats = learn_feats_all[:max_l_seg_idx,:]
    # we must keep in mind for each segment index, the corresponding indices in the learn_spec mat
    l_seg_bounds = []
    ref_time = np.arange(0., float(learning.length)/float(learning.fs), float(tstep)/float(learning.fs))
    for segI in range(len(l_segments)-1):
        startIdx = np.where(ref_time > l_segments[segI])[0][0]
        endIdx = np.where(ref_time > l_segments[segI+1])[0][0]
        l_seg_bounds.append((startIdx,endIdx))
    l_seg_bounds.append((endIdx, ref_time.shape[0]))
    
    # Get testing data
    testing = Signal(audiofile, mono=True)
    testing.crop(test_start*testing.fs, (test_start+test_duration)*learning.fs)
    
    # get the testing features
    min_t_seg_idx =  np.where(segments_all < test_start)[0][-1]
    max_t_seg_idx =  np.where(segments_all < test_start + test_duration)[0][-1]
    t_segments = segments_all[min_t_seg_idx:max_t_seg_idx]
    t_segment_lengths = (t_segments[1:] - t_segments[0:-1])*testing.fs
    test_feats = learn_feats_all[min_t_seg_idx:max_t_seg_idx,:]
    
    # find the nearest neighbors
    from sklearn.neighbors import NearestNeighbors
    neigh = NearestNeighbors(1)
    # fit on the learning data
    neigh.fit(learn_feats)
    neighb_segments_idx = neigh.kneighbors(test_feats, return_distance=False)
    
    # kneighs is a set of segment indices, we need to get the spectrogram back from the learning data
    # then fit the new segment lengths
    
    target_length = int(test_duration*testing.fs)
    
    neighb_segments = zip(neighb_segments_idx[:,0], t_segment_lengths.astype(int))



    morphed_spectro = spec_morph(np.abs(learn_specs), target_length, neighb_segments, l_seg_bounds)
    
    
    # retrieve true stft for comparison
    test_specs = features.get_stft(testing.data, wsize, tstep)
    
    plt.figure()
    plt.subplot(121)
    plt.imshow(np.log(np.abs(test_specs)), origin='lower')
    plt.colorbar()
    plt.subplot(122)
    plt.imshow(np.log(morphed_spectro.T), origin='lower')
    plt.colorbar()
    plt.show()
    
    
    init_vec = np.random.randn(morphed_spectro.shape[0]*tstep)
    rec_method2 = transforms.gl_recons(morphed_spectro.T, init_vec, 10, wsize, tstep, display=False)
    rec_sig_2 = Signal(rec_method2, testing.fs, mono=True, normalize=True)
    rec_sig_2.write('/sons/tests/rec_sig2.wav')
def classify(h5):
	output_array={}
	# duration
	duration=hdf5_getters.get_duration(h5)
	output_array["duration"]=duration	### ADDED VALUE TO ARRAY
	# number of bars
	bars=hdf5_getters.get_bars_start(h5)
	num_bars=len(bars)
	output_array["num_bars"]=num_bars	### ADDED VALUE TO ARRAY
	# mean and variance in bar length
	bar_length=numpy.ediff1d(bars)
	variance_bar_length=numpy.var(bar_length)
	output_array["variance_bar_length"]=variance_bar_length	### ADDED VALUE TO ARRAY
	# number of beats
	beats=hdf5_getters.get_beats_start(h5)
	num_beats=len(beats)
	output_array["num_beats"]=num_beats	### ADDED VALUE TO ARRAY
	# mean and variance in beats length
	beats_length=numpy.ediff1d(beats)
	variance_beats_length=numpy.var(bar_length)
	output_array["variance_beats_length"]=variance_beats_length	### ADDED VALUE TO ARRAY
	# danceability
	danceability=hdf5_getters.get_danceability(h5)
	output_array["danceability"]=danceability	### ADDED VALUE TO ARRAY
	# end of fade in
	end_of_fade_in=hdf5_getters.get_end_of_fade_in(h5)
	output_array["end_of_fade_in"]=end_of_fade_in	### ADDED VALUE TO ARRAY
	# energy
	energy=hdf5_getters.get_energy(h5)
	output_array["energy"]=energy	### ADDED VALUE TO ARRAY
	# key
	key=hdf5_getters.get_key(h5)
	output_array["key"]=int(key)	### ADDED VALUE TO ARRAY
	# loudness
	loudness=hdf5_getters.get_loudness(h5)
	output_array["loudness"]=loudness	### ADDED VALUE TO ARRAY
	# mode
	mode=hdf5_getters.get_mode(h5)
	output_array["mode"]=int(mode)	### ADDED VALUE TO ARRAY
	# number sections
	sections=hdf5_getters.get_sections_start(h5)
	num_sections=len(sections)
	output_array["num_sections"]=num_sections	### ADDED VALUE TO ARRAY
	# mean and variance in sections length
	sections_length=numpy.ediff1d(sections)
	variance_sections_length=numpy.var(sections)
	output_array["variance_sections_length"]=variance_sections_length	### ADDED VALUE TO ARRAY
	# number segments
	segments=hdf5_getters.get_segments_start(h5)
	num_segments=len(segments)
	output_array["num_segments"]=num_segments	### ADDED VALUE TO ARRAY
	# mean and variance in segments length
	segments_length=numpy.ediff1d(segments)
	variance_segments_length=numpy.var(segments)
	output_array["variance_segments_length"]=variance_segments_length	### ADDED VALUE TO ARRAY
	# segment loudness max
	segment_loudness_max_array=hdf5_getters.get_segments_loudness_max(h5)
	segment_loudness_max_time_array=hdf5_getters.get_segments_loudness_max_time(h5)
	segment_loudness_max_index=0
	for i in range(len(segment_loudness_max_array)):
		if segment_loudness_max_array[i]>segment_loudness_max_array[segment_loudness_max_index]:
			segment_loudness_max_index=i
	segment_loudness_max=segment_loudness_max_array[segment_loudness_max_index]
	segment_loudness_max_time=segment_loudness_max_time_array[segment_loudness_max_index]
	output_array["segment_loudness_max"]=segment_loudness_max	### ADDED VALUE TO ARRAY
	output_array["segment_loudness_time"]=segment_loudness_max_time	### ADDED VALUE TO ARRAY
			
	# POSSIBLE TODO: use average function instead and weight by segment length
	# segment loudness mean (start)
	segment_loudness_array=hdf5_getters.get_segments_loudness_start(h5)
	segment_loudness_mean=numpy.mean(segment_loudness_array)
	output_array["segment_loudness_mean"]=segment_loudness_mean	### ADDED VALUE TO ARRAY
	# segment loudness variance (start)
	segment_loudness_variance=numpy.var(segment_loudness_array)
	output_array["segment_loudness_variance"]=segment_loudness_variance	### ADDED VALUE TO ARRAY
	# segment pitches
	segment_pitches_array=hdf5_getters.get_segments_pitches(h5)
	segment_pitches_mean=numpy.mean(segment_pitches_array,axis=0).tolist()
	output_array["segment_pitches_mean"]=segment_pitches_mean
	# segment pitches variance (start)
	segment_pitches_variance=numpy.var(segment_pitches_array,axis=0).tolist()
	output_array["segment_pitches_variance"]=segment_pitches_variance
	# segment timbres
	segment_timbres_array=hdf5_getters.get_segments_timbre(h5)
	segment_timbres_mean=numpy.mean(segment_timbres_array,axis=0).tolist()
	output_array["segment_timbres_mean"]=segment_timbres_mean
	# segment timbres variance (start)
	segment_timbres_variance=numpy.var(segment_timbres_array,axis=0).tolist()
	output_array["segment_timbres_variance"]=segment_timbres_variance
	# hotttnesss
	hottness=hdf5_getters.get_song_hotttnesss(h5,0)
	output_array["hottness"]=hottness	### ADDED VALUE TO ARRAY
	# duration-start of fade out
	start_of_fade_out=hdf5_getters.get_start_of_fade_out(h5)
	fade_out=duration-start_of_fade_out
	output_array["fade_out"]=fade_out	### ADDED VALUE TO ARRAY
	# tatums
	tatums=hdf5_getters.get_tatums_start(h5)
	num_tatums=len(tatums)
	output_array["num_tatums"]=num_tatums	### ADDED VALUE TO ARRAY
	# mean and variance in tatums length
	tatums_length=numpy.ediff1d(tatums)
	variance_tatums_length=numpy.var(tatums_length)
	output_array["variance_tatums_length"]=variance_tatums_length	### ADDED VALUE TO ARRAY
	# tempo
	tempo=hdf5_getters.get_tempo(h5)
	output_array["tempo"]=tempo	### ADDED VALUE TO ARRAY
	# time signature
	time_signature=hdf5_getters.get_time_signature(h5)
	output_array["time_signature"]=int(time_signature)	### ADDED VALUE TO ARRAY
	# year
	year=hdf5_getters.get_year(h5)
	output_array["year"]=int(year)	### ADDED VALUE TO ARRAY
	# artist terms
	artist_terms=hdf5_getters.get_artist_terms(h5,0)
	output_array["artist_terms"]=artist_terms.tolist()
	artist_terms_freq=hdf5_getters.get_artist_terms_freq(h5,0)
	output_array["artist_terms_freq"]=artist_terms_freq.tolist()
	artist_name=hdf5_getters.get_artist_name(h5,0)
	output_array["artist_name"]=artist_name
	artist_id=hdf5_getters.get_artist_id(h5,0)
	output_array["artist_id"]=artist_id
	# title
	title=hdf5_getters.get_title(h5,0)
	output_array["title"]=title

	return output_array
Exemple #35
0
def data_to_flat_file(basedir,ext='.h5') :
    """This function extract the information from the tables and creates the flat file."""	
    count = 0;	#song counter
    list_to_write= []
    row_to_write = ""
    writer = csv.writer(open("metadata.csv", "wb"))
    for root, dirs, files in os.walk(basedir):
	files = glob.glob(os.path.join(root,'*'+ext))
        for f in files:
	    print f	#the name of the file
            h5 = hdf5_getters.open_h5_file_read(f)
	    title = hdf5_getters.get_title(h5) 
	    title= title.replace('"','') 
	    comma=title.find(',')	#eliminating commas in the title
	    if	comma != -1:
		    print title
		    time.sleep(1)
	    album = hdf5_getters.get_release(h5)
	    album= album.replace('"','')	#eliminating commas in the album	
	    comma=album.find(',')
	    if	comma != -1:
		    print album
		    time.sleep(1)
	    artist_name = hdf5_getters.get_artist_name(h5)
	    comma=artist_name.find(',')
	    if	comma != -1:
		    print artist_name
		    time.sleep(1)
	    artist_name= artist_name.replace('"','')	#eliminating double quotes
	    duration = hdf5_getters.get_duration(h5)
	    samp_rt = hdf5_getters.get_analysis_sample_rate(h5)
	    artist_7digitalid = hdf5_getters.get_artist_7digitalid(h5)
	    artist_fam = hdf5_getters.get_artist_familiarity(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_fam) == True:
	            artist_fam=-1
	    artist_hotness= hdf5_getters.get_artist_hotttnesss(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_hotness) == True:
	            artist_hotness=-1
	    artist_id = hdf5_getters.get_artist_id(h5)
	    artist_lat = hdf5_getters.get_artist_latitude(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_lat) == True:
	            artist_lat=-1
	    artist_loc = hdf5_getters.get_artist_location(h5)
		#checks artist_loc to see if it is a hyperlink if it is set as empty string
	    artist_loc = artist_loc.replace(",", "\,");
	    if artist_loc.startswith("<a"):
                artist_loc = ""
	    if len(artist_loc) > 100:
                artist_loc = ""
	    artist_lon = hdf5_getters.get_artist_longitude(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_lon) == True:
	            artist_lon=-1
	    artist_mbid = hdf5_getters.get_artist_mbid(h5)
	    artist_pmid = hdf5_getters.get_artist_playmeid(h5)
	    audio_md5 = hdf5_getters.get_audio_md5(h5)
	    danceability = hdf5_getters.get_danceability(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(danceability) == True:
	            danceability=-1
	    end_fade_in =hdf5_getters.get_end_of_fade_in(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(end_fade_in) == True:
	            end_fade_in=-1
	    energy = hdf5_getters.get_energy(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(energy) == True:
	            energy=-1
            song_key = hdf5_getters.get_key(h5)
	    key_c = hdf5_getters.get_key_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(key_c) == True:
	            key_c=-1
	    loudness = hdf5_getters.get_loudness(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(loudness) == True:
	            loudness=-1
	    mode = hdf5_getters.get_mode(h5)
	    mode_conf = hdf5_getters.get_mode_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(mode_conf) == True:
	            mode_conf=-1
	    release_7digitalid = hdf5_getters.get_release_7digitalid(h5)
	    song_hot = hdf5_getters.get_song_hotttnesss(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(song_hot) == True:
	            song_hot=-1
	    song_id = hdf5_getters.get_song_id(h5)
	    start_fade_out = hdf5_getters.get_start_of_fade_out(h5)
	    tempo = hdf5_getters.get_tempo(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(tempo) == True:
	            tempo=-1
	    time_sig = hdf5_getters.get_time_signature(h5)
	    time_sig_c = hdf5_getters.get_time_signature_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(time_sig_c) == True:
	            time_sig_c=-1
	    track_id = hdf5_getters.get_track_id(h5)
	    track_7digitalid = hdf5_getters.get_track_7digitalid(h5)
	    year = hdf5_getters.get_year(h5)
	    bars_c = hdf5_getters.get_bars_confidence(h5)
	    bars_c_avg= get_avg(bars_c)
	    bars_c_max= get_max(bars_c)
	    bars_c_min = get_min(bars_c)
	    bars_c_stddev= get_stddev(bars_c)
	    bars_c_count = get_count(bars_c)
	    bars_c_sum = get_sum(bars_c)
	    bars_start = hdf5_getters.get_bars_start(h5)
	    bars_start_avg = get_avg(bars_start)
	    bars_start_max= get_max(bars_start)
	    bars_start_min = get_min(bars_start)
	    bars_start_stddev= get_stddev(bars_start)
	    bars_start_count = get_count(bars_start)
	    bars_start_sum = get_sum(bars_start)
            beats_c = hdf5_getters.get_beats_confidence(h5)
            beats_c_avg= get_avg(beats_c)
	    beats_c_max= get_max(beats_c)
	    beats_c_min = get_min(beats_c)
	    beats_c_stddev= get_stddev(beats_c)
	    beats_c_count = get_count(beats_c)
	    beats_c_sum = get_sum(beats_c)
            beats_start = hdf5_getters.get_beats_start(h5)
 	    beats_start_avg = get_avg(beats_start)
	    beats_start_max= get_max(beats_start)
	    beats_start_min = get_min(beats_start)
	    beats_start_stddev= get_stddev(beats_start)
	    beats_start_count = get_count(beats_start)
	    beats_start_sum = get_sum(beats_start)
	    sec_c = hdf5_getters.get_sections_confidence(h5)
            sec_c_avg= get_avg(sec_c)
	    sec_c_max= get_max(sec_c)
	    sec_c_min = get_min(sec_c)
	    sec_c_stddev= get_stddev(sec_c)
	    sec_c_count = get_count(sec_c)
	    sec_c_sum = get_sum(sec_c)
	    sec_start = hdf5_getters.get_sections_start(h5)
            sec_start_avg = get_avg(sec_start)
	    sec_start_max= get_max(sec_start)
	    sec_start_min = get_min(sec_start)
	    sec_start_stddev= get_stddev(sec_start)
	    sec_start_count = get_count(sec_start)
	    sec_start_sum = get_sum(sec_start)
	    seg_c = hdf5_getters.get_segments_confidence(h5)
	    seg_c_avg= get_avg(seg_c)
	    seg_c_max= get_max(seg_c)
	    seg_c_min = get_min(seg_c)
	    seg_c_stddev= get_stddev(seg_c)
	    seg_c_count = get_count(seg_c)
	    seg_c_sum = get_sum(seg_c)
            seg_loud_max = hdf5_getters.get_segments_loudness_max(h5)
            seg_loud_max_avg= get_avg(seg_loud_max)
	    seg_loud_max_max= get_max(seg_loud_max)
	    seg_loud_max_min = get_min(seg_loud_max)
	    seg_loud_max_stddev= get_stddev(seg_loud_max)
	    seg_loud_max_count = get_count(seg_loud_max)
	    seg_loud_max_sum = get_sum(seg_loud_max)
	    seg_loud_max_time = hdf5_getters.get_segments_loudness_max_time(h5)
	    seg_loud_max_time_avg= get_avg(seg_loud_max_time)
	    seg_loud_max_time_max= get_max(seg_loud_max_time)
	    seg_loud_max_time_min = get_min(seg_loud_max_time)
	    seg_loud_max_time_stddev= get_stddev(seg_loud_max_time)
	    seg_loud_max_time_count = get_count(seg_loud_max_time)
	    seg_loud_max_time_sum = get_sum(seg_loud_max_time)
	    seg_loud_start = hdf5_getters.get_segments_loudness_start(h5)
	    seg_loud_start_avg= get_avg(seg_loud_start)
	    seg_loud_start_max= get_max(seg_loud_start)
	    seg_loud_start_min = get_min(seg_loud_start)
	    seg_loud_start_stddev= get_stddev(seg_loud_start)
	    seg_loud_start_count = get_count(seg_loud_start)
	    seg_loud_start_sum = get_sum(seg_loud_start)					      
	    seg_pitch = hdf5_getters.get_segments_pitches(h5)
	    pitch_size = len(seg_pitch)
	    seg_start = hdf5_getters.get_segments_start(h5)
	    seg_start_avg= get_avg(seg_start)
	    seg_start_max= get_max(seg_start)
	    seg_start_min = get_min(seg_start)
	    seg_start_stddev= get_stddev(seg_start)
	    seg_start_count = get_count(seg_start)
	    seg_start_sum = get_sum(seg_start)
	    seg_timbre = hdf5_getters.get_segments_timbre(h5)
	    tatms_c = hdf5_getters.get_tatums_confidence(h5)
	    tatms_c_avg= get_avg(tatms_c)
	    tatms_c_max= get_max(tatms_c)
	    tatms_c_min = get_min(tatms_c)
	    tatms_c_stddev= get_stddev(tatms_c)
	    tatms_c_count = get_count(tatms_c)
	    tatms_c_sum = get_sum(tatms_c)
	    tatms_start = hdf5_getters.get_tatums_start(h5)
	    tatms_start_avg= get_avg(tatms_start)
	    tatms_start_max= get_max(tatms_start)
	    tatms_start_min = get_min(tatms_start)
	    tatms_start_stddev= get_stddev(tatms_start)
	    tatms_start_count = get_count(tatms_start)
	    tatms_start_sum = get_sum(tatms_start)
	
	    #Getting the genres
	    genre_set = 0    #flag to see if the genre has been set or not
	    art_trm = hdf5_getters.get_artist_terms(h5)
	    trm_freq = hdf5_getters.get_artist_terms_freq(h5)
	    trn_wght = hdf5_getters.get_artist_terms_weight(h5)
	    a_mb_tags = hdf5_getters.get_artist_mbtags(h5)
	    genre_indexes=get_genre_indexes(trm_freq) #index of the highest freq
	    final_genre=[]
	    genres_so_far=[]
	    for i in range(len(genre_indexes)):
		    genre_tmp=get_genre(art_trm,genre_indexes[i])   #genre that corresponds to the highest freq
		    genres_so_far=genre_dict.get_genre_in_dict(genre_tmp) #getting the genre from the dictionary
		    if len(genres_so_far) != 0:
			    for i in genres_so_far:
				final_genre.append(i)
				genre_set=1				#genre was found in dictionary
				  
		
	    
	    if genre_set == 1:
		    col_num=[]
		   
		    for genre in final_genre:
			    column=int(genre)				#getting the column number of the genre
			    col_num.append(column)

		    genre_array=genre_columns(col_num)	         #genre array
 	    else:
		    genre_array=genre_columns(-1)		#the genre was not found in the dictionary

	    transpose_pitch= seg_pitch.transpose() #this is to tranpose the matrix,so we can have 12 rows
	    #arrays containing the aggregate values of the 12 rows
	    seg_pitch_avg=[]
	    seg_pitch_max=[]
	    seg_pitch_min=[]
            seg_pitch_stddev=[]
            seg_pitch_count=[]
	    seg_pitch_sum=[]
            i=0
	    #Getting the aggregate values in the pitches array
	    for row in transpose_pitch:
		   seg_pitch_avg.append(get_avg(row))
		   seg_pitch_max.append(get_max(row))
	           seg_pitch_min.append(get_min(row))
		   seg_pitch_stddev.append(get_stddev(row))
		   seg_pitch_count.append(get_count(row))
                   seg_pitch_sum.append(get_sum(row))
		   i=i+1

	    #extracting information from the timbre array 
            transpose_timbre = seg_pitch.transpose() #tranposing matrix, to have 12 rows
	    #arrays containing the aggregate values of the 12 rows
	    seg_timbre_avg=[]
	    seg_timbre_max=[]
	    seg_timbre_min=[]
            seg_timbre_stddev=[]
            seg_timbre_count=[]
	    seg_timbre_sum=[]
            i=0
	    for row in transpose_timbre:
		   seg_timbre_avg.append(get_avg(row))
		   seg_timbre_max.append(get_max(row))
	           seg_timbre_min.append(get_min(row))
		   seg_timbre_stddev.append(get_stddev(row))
		   seg_timbre_count.append(get_count(row))
                   seg_timbre_sum.append(get_sum(row))
		   i=i+1
		


		#Writing to the flat file

            writer.writerow([title,album,artist_name,duration,samp_rt,artist_7digitalid,artist_fam,artist_hotness,artist_id,artist_lat,artist_loc,artist_lon,artist_mbid,genre_array[0],genre_array[1],genre_array[2],
genre_array[3],genre_array[4],genre_array[5],genre_array[6],genre_array[7],genre_array[8],genre_array[9],genre_array[10],genre_array[11],genre_array[12],genre_array[13],genre_array[14],genre_array[15],
genre_array[16],genre_array[17],genre_array[18],genre_array[19],genre_array[20],genre_array[21],genre_array[22],genre_array[23],genre_array[24],genre_array[25],genre_array[26],
genre_array[27],genre_array[28],genre_array[29],genre_array[30],genre_array[31],genre_array[32],genre_array[33],genre_array[34],genre_array[35],genre_array[36],genre_array[37],genre_array[38],
genre_array[39],genre_array[40],genre_array[41],genre_array[42],genre_array[43],genre_array[44],genre_array[45],genre_array[46],genre_array[47],genre_array[48],genre_array[49],
genre_array[50],genre_array[51],genre_array[52],genre_array[53],genre_array[54],genre_array[55],genre_array[56],genre_array[57],genre_array[58],genre_array[59],
genre_array[60],genre_array[61],genre_array[62],genre_array[63],genre_array[64],genre_array[65],genre_array[66],genre_array[67],genre_array[68],genre_array[69],
genre_array[70],genre_array[71],genre_array[72],genre_array[73],genre_array[74],genre_array[75],genre_array[76],genre_array[77],genre_array[78],genre_array[79],
genre_array[80],genre_array[81],genre_array[82],genre_array[83],genre_array[84],genre_array[85],genre_array[86],genre_array[87],genre_array[88],genre_array[89],
genre_array[90],genre_array[91],genre_array[92],genre_array[93],genre_array[94],genre_array[95],genre_array[96],genre_array[97],genre_array[98],genre_array[99],genre_array[100],genre_array[101],
genre_array[102],genre_array[103],genre_array[104],genre_array[105],genre_array[106],genre_array[107],genre_array[108],genre_array[109],genre_array[110],genre_array[111],genre_array[112],
genre_array[113],genre_array[114],genre_array[115],genre_array[116],genre_array[117],genre_array[118],genre_array[119],genre_array[120],genre_array[121],genre_array[122],genre_array[123],
genre_array[124],genre_array[125],genre_array[126],genre_array[127],genre_array[128],genre_array[129],genre_array[130],genre_array[131],genre_array[132],
artist_pmid,audio_md5,danceability,end_fade_in,energy,song_key,key_c,loudness,mode,mode_conf,release_7digitalid,song_hot,song_id,start_fade_out,tempo,time_sig,time_sig_c,track_id,track_7digitalid,year,bars_c_avg,bars_c_max,bars_c_min,bars_c_stddev,bars_c_count,bars_c_sum,bars_start_avg,bars_start_max,bars_start_min,bars_start_stddev,bars_start_count,bars_start_sum,beats_c_avg,beats_c_max,beats_c_min,beats_c_stddev,beats_c_count,beats_c_sum,beats_start_avg,beats_start_max,beats_start_min, beats_start_stddev,beats_start_count,beats_start_sum, sec_c_avg,sec_c_max,sec_c_min,sec_c_stddev,sec_c_count,sec_c_sum,sec_start_avg,sec_start_max,sec_start_min,sec_start_stddev,sec_start_count,sec_start_sum,seg_c_avg,seg_c_max,seg_c_min,seg_c_stddev,seg_c_count,seg_c_sum,seg_loud_max_avg,seg_loud_max_max,seg_loud_max_min,seg_loud_max_stddev,seg_loud_max_count,seg_loud_max_sum,seg_loud_max_time_avg,seg_loud_max_time_max,seg_loud_max_time_min,seg_loud_max_time_stddev,seg_loud_max_time_count,seg_loud_max_time_sum,seg_loud_start_avg,seg_loud_start_max,seg_loud_start_min,seg_loud_start_stddev,seg_loud_start_count,seg_loud_start_sum,seg_pitch_avg[0],seg_pitch_max[0],seg_pitch_min[0],seg_pitch_stddev[0],seg_pitch_count[0],seg_pitch_sum[0],seg_pitch_avg[1],seg_pitch_max[1],seg_pitch_min[1],seg_pitch_stddev[1],seg_pitch_count[1],seg_pitch_sum[1],seg_pitch_avg[2],seg_pitch_max[2],seg_pitch_min[2],seg_pitch_stddev[2],seg_pitch_count[2],seg_pitch_sum[2],seg_pitch_avg[3],seg_pitch_max[3],seg_pitch_min[3],seg_pitch_stddev[3],seg_pitch_count[3],seg_pitch_sum[3],seg_pitch_avg[4],seg_pitch_max[4],seg_pitch_min[4],seg_pitch_stddev[4],seg_pitch_count[4],seg_pitch_sum[4],seg_pitch_avg[5],seg_pitch_max[5],seg_pitch_min[5],seg_pitch_stddev[5],seg_pitch_count[5],seg_pitch_sum[5],seg_pitch_avg[6],seg_pitch_max[6],seg_pitch_min[6],seg_pitch_stddev[6],seg_pitch_count[6],seg_pitch_sum[6],seg_pitch_avg[7],seg_pitch_max[7],seg_pitch_min[7],seg_pitch_stddev[7],seg_pitch_count[7],seg_pitch_sum[7],seg_pitch_avg[8],seg_pitch_max[8],seg_pitch_min[8],seg_pitch_stddev[8],seg_pitch_count[8],seg_pitch_sum[8],seg_pitch_avg[9],seg_pitch_max[9],seg_pitch_min[9],seg_pitch_stddev[9],seg_pitch_count[9],seg_pitch_sum[9],seg_pitch_avg[10],seg_pitch_max[10],seg_pitch_min[10],seg_pitch_stddev[10],seg_pitch_count[10],seg_pitch_sum[10],seg_pitch_avg[11],seg_pitch_max[11],seg_pitch_min[11],
seg_pitch_stddev[11],seg_pitch_count[11],seg_pitch_sum[11],seg_start_avg,seg_start_max,seg_start_min,seg_start_stddev, 
seg_start_count,seg_start_sum,seg_timbre_avg[0],seg_timbre_max[0],seg_timbre_min[0],seg_timbre_stddev[0],seg_timbre_count[0],
seg_timbre_sum[0],seg_timbre_avg[1],seg_timbre_max[1],seg_timbre_min[1],seg_timbre_stddev[1],seg_timbre_count[1],
seg_timbre_sum[1],seg_timbre_avg[2],seg_timbre_max[2],seg_timbre_min[2],seg_timbre_stddev[2],seg_timbre_count[2],
seg_timbre_sum[2],seg_timbre_avg[3],seg_timbre_max[3],seg_timbre_min[3],seg_timbre_stddev[3],seg_timbre_count[3],
seg_timbre_sum[3],seg_timbre_avg[4],seg_timbre_max[4],seg_timbre_min[4],seg_timbre_stddev[4],seg_timbre_count[4],
seg_timbre_sum[4],seg_timbre_avg[5],seg_timbre_max[5],seg_timbre_min[5],seg_timbre_stddev[5],seg_timbre_count[5],
seg_timbre_sum[5],seg_timbre_avg[6],seg_timbre_max[6],seg_timbre_min[6],seg_timbre_stddev[6],seg_timbre_count[6],
seg_timbre_sum[6],seg_timbre_avg[7],seg_timbre_max[7],seg_timbre_min[7],seg_timbre_stddev[7],seg_timbre_count[7],
seg_timbre_sum[7],seg_timbre_avg[8],seg_timbre_max[8],seg_timbre_min[8],seg_timbre_stddev[8],seg_timbre_count[8],
seg_timbre_sum[8],seg_timbre_avg[9],seg_timbre_max[9],seg_timbre_min[9],seg_timbre_stddev[9],seg_timbre_count[9],
seg_timbre_sum[9],seg_timbre_avg[10],seg_timbre_max[10],seg_timbre_min[10],seg_timbre_stddev[10],seg_timbre_count[10],
seg_timbre_sum[10],seg_timbre_avg[11],seg_timbre_max[11],seg_timbre_min[11],seg_timbre_stddev[11],seg_timbre_count[11],
seg_timbre_sum[11],tatms_c_avg,tatms_c_max,tatms_c_min,tatms_c_stddev,tatms_c_count,tatms_c_sum,tatms_start_avg,tatms_start_max,tatms_start_min,tatms_start_stddev,tatms_start_count,tatms_start_sum])






	    h5.close()
	    count=count+1;
	    print count;
def data_to_flat_file(basedir,ext='.h5') :
    """This function extract the information from the tables and creates the flat file."""	
    count = 0;	#song counter
    list_to_write= []
    row_to_write = ""
    writer = csv.writer(open("metadata_wholeA.csv", "wb"))
    for root, dirs, files in os.walk(basedir):
	files = glob.glob(os.path.join(root,'*'+ext))
        for f in files:
	    print f	#the name of the file
            h5 = hdf5_getters.open_h5_file_read(f)
	    title = hdf5_getters.get_title(h5) 
	    title= title.replace('"','') 
	    comma=title.find(',')	#eliminating commas in the title
	    if	comma != -1:
		    print title
		    time.sleep(1)
	    album = hdf5_getters.get_release(h5)
	    album= album.replace('"','')	#eliminating commas in the album	
	    comma=album.find(',')
	    if	comma != -1:
		    print album
		    time.sleep(1)
	    artist_name = hdf5_getters.get_artist_name(h5)
	    comma=artist_name.find(',')
	    if	comma != -1:
		    print artist_name
		    time.sleep(1)
	    artist_name= artist_name.replace('"','')	#eliminating double quotes
	    duration = hdf5_getters.get_duration(h5)
	    samp_rt = hdf5_getters.get_analysis_sample_rate(h5)
	    artist_7digitalid = hdf5_getters.get_artist_7digitalid(h5)
	    artist_fam = hdf5_getters.get_artist_familiarity(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_fam) == True:
	            artist_fam=-1
	    artist_hotness= hdf5_getters.get_artist_hotttnesss(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_hotness) == True:
	            artist_hotness=-1
	    artist_id = hdf5_getters.get_artist_id(h5)
	    artist_lat = hdf5_getters.get_artist_latitude(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_lat) == True:
	            artist_lat=-1
	    artist_loc = hdf5_getters.get_artist_location(h5)
		#checks artist_loc to see if it is a hyperlink if it is set as empty string
	    artist_loc = artist_loc.replace(",", "\,");
	    if artist_loc.startswith("<a"):
                artist_loc = ""
	    if len(artist_loc) > 100:
                artist_loc = ""
	    artist_lon = hdf5_getters.get_artist_longitude(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_lon) == True:
	            artist_lon=-1
	    artist_mbid = hdf5_getters.get_artist_mbid(h5)
	    artist_pmid = hdf5_getters.get_artist_playmeid(h5)
	    audio_md5 = hdf5_getters.get_audio_md5(h5)
	    danceability = hdf5_getters.get_danceability(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(danceability) == True:
	            danceability=-1
	    end_fade_in =hdf5_getters.get_end_of_fade_in(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(end_fade_in) == True:
	            end_fade_in=-1
	    energy = hdf5_getters.get_energy(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(energy) == True:
	            energy=-1
            song_key = hdf5_getters.get_key(h5)
	    key_c = hdf5_getters.get_key_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(key_c) == True:
	            key_c=-1
	    loudness = hdf5_getters.get_loudness(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(loudness) == True:
	            loudness=-1
	    mode = hdf5_getters.get_mode(h5)
	    mode_conf = hdf5_getters.get_mode_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(mode_conf) == True:
	            mode_conf=-1
	    release_7digitalid = hdf5_getters.get_release_7digitalid(h5)
	    song_hot = hdf5_getters.get_song_hotttnesss(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(song_hot) == True:
	            song_hot=-1
	    song_id = hdf5_getters.get_song_id(h5)
	    start_fade_out = hdf5_getters.get_start_of_fade_out(h5)
	    tempo = hdf5_getters.get_tempo(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(tempo) == True:
	            tempo=-1
	    time_sig = hdf5_getters.get_time_signature(h5)
	    time_sig_c = hdf5_getters.get_time_signature_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(time_sig_c) == True:
	            time_sig_c=-1
	    track_id = hdf5_getters.get_track_id(h5)
	    track_7digitalid = hdf5_getters.get_track_7digitalid(h5)
	    year = hdf5_getters.get_year(h5)
	    bars_c = hdf5_getters.get_bars_confidence(h5)
	    bars_c_avg= get_avg(bars_c)
	    bars_c_max= get_max(bars_c)
	    bars_c_min = get_min(bars_c)
	    bars_c_stddev= get_stddev(bars_c)
	    bars_c_count = get_count(bars_c)
	    bars_c_sum = get_sum(bars_c)
	    bars_start = hdf5_getters.get_bars_start(h5)
	    bars_start_avg = get_avg(bars_start)
	    bars_start_max= get_max(bars_start)
	    bars_start_min = get_min(bars_start)
	    bars_start_stddev= get_stddev(bars_start)
	    bars_start_count = get_count(bars_start)
	    bars_start_sum = get_sum(bars_start)
            beats_c = hdf5_getters.get_beats_confidence(h5)
            beats_c_avg= get_avg(beats_c)
	    beats_c_max= get_max(beats_c)
	    beats_c_min = get_min(beats_c)
	    beats_c_stddev= get_stddev(beats_c)
	    beats_c_count = get_count(beats_c)
	    beats_c_sum = get_sum(beats_c)
            beats_start = hdf5_getters.get_beats_start(h5)
 	    beats_start_avg = get_avg(beats_start)
	    beats_start_max= get_max(beats_start)
	    beats_start_min = get_min(beats_start)
	    beats_start_stddev= get_stddev(beats_start)
	    beats_start_count = get_count(beats_start)
	    beats_start_sum = get_sum(beats_start)
	    sec_c = hdf5_getters.get_sections_confidence(h5)
            sec_c_avg= get_avg(sec_c)
	    sec_c_max= get_max(sec_c)
	    sec_c_min = get_min(sec_c)
	    sec_c_stddev= get_stddev(sec_c)
	    sec_c_count = get_count(sec_c)
	    sec_c_sum = get_sum(sec_c)
	    sec_start = hdf5_getters.get_sections_start(h5)
            sec_start_avg = get_avg(sec_start)
	    sec_start_max= get_max(sec_start)
	    sec_start_min = get_min(sec_start)
	    sec_start_stddev= get_stddev(sec_start)
	    sec_start_count = get_count(sec_start)
	    sec_start_sum = get_sum(sec_start)
	    seg_c = hdf5_getters.get_segments_confidence(h5)
	    seg_c_avg= get_avg(seg_c)
	    seg_c_max= get_max(seg_c)
	    seg_c_min = get_min(seg_c)
	    seg_c_stddev= get_stddev(seg_c)
	    seg_c_count = get_count(seg_c)
	    seg_c_sum = get_sum(seg_c)
            seg_loud_max = hdf5_getters.get_segments_loudness_max(h5)
            seg_loud_max_avg= get_avg(seg_loud_max)
	    seg_loud_max_max= get_max(seg_loud_max)
	    seg_loud_max_min = get_min(seg_loud_max)
	    seg_loud_max_stddev= get_stddev(seg_loud_max)
	    seg_loud_max_count = get_count(seg_loud_max)
	    seg_loud_max_sum = get_sum(seg_loud_max)
	    seg_loud_max_time = hdf5_getters.get_segments_loudness_max_time(h5)
	    seg_loud_max_time_avg= get_avg(seg_loud_max_time)
	    seg_loud_max_time_max= get_max(seg_loud_max_time)
	    seg_loud_max_time_min = get_min(seg_loud_max_time)
	    seg_loud_max_time_stddev= get_stddev(seg_loud_max_time)
	    seg_loud_max_time_count = get_count(seg_loud_max_time)
	    seg_loud_max_time_sum = get_sum(seg_loud_max_time)
	    seg_loud_start = hdf5_getters.get_segments_loudness_start(h5)
	    seg_loud_start_avg= get_avg(seg_loud_start)
	    seg_loud_start_max= get_max(seg_loud_start)
	    seg_loud_start_min = get_min(seg_loud_start)
	    seg_loud_start_stddev= get_stddev(seg_loud_start)
	    seg_loud_start_count = get_count(seg_loud_start)
	    seg_loud_start_sum = get_sum(seg_loud_start)					      
	    seg_pitch = hdf5_getters.get_segments_pitches(h5)
	    pitch_size = len(seg_pitch)
	    seg_start = hdf5_getters.get_segments_start(h5)
	    seg_start_avg= get_avg(seg_start)
	    seg_start_max= get_max(seg_start)
	    seg_start_min = get_min(seg_start)
	    seg_start_stddev= get_stddev(seg_start)
	    seg_start_count = get_count(seg_start)
	    seg_start_sum = get_sum(seg_start)
	    seg_timbre = hdf5_getters.get_segments_timbre(h5)
	    tatms_c = hdf5_getters.get_tatums_confidence(h5)
	    tatms_c_avg= get_avg(tatms_c)
	    tatms_c_max= get_max(tatms_c)
	    tatms_c_min = get_min(tatms_c)
	    tatms_c_stddev= get_stddev(tatms_c)
	    tatms_c_count = get_count(tatms_c)
	    tatms_c_sum = get_sum(tatms_c)
	    tatms_start = hdf5_getters.get_tatums_start(h5)
	    tatms_start_avg= get_avg(tatms_start)
	    tatms_start_max= get_max(tatms_start)
	    tatms_start_min = get_min(tatms_start)
	    tatms_start_stddev= get_stddev(tatms_start)
	    tatms_start_count = get_count(tatms_start)
	    tatms_start_sum = get_sum(tatms_start)
	
	    #Getting the genres
	    genre_set = 0    #flag to see if the genre has been set or not
	    art_trm = hdf5_getters.get_artist_terms(h5)
	    trm_freq = hdf5_getters.get_artist_terms_freq(h5)
	    trn_wght = hdf5_getters.get_artist_terms_weight(h5)
	    a_mb_tags = hdf5_getters.get_artist_mbtags(h5)
	    genre_indexes=get_genre_indexes(trm_freq) #index of the highest freq
	    final_genre=[]
	    genres_so_far=[]
	    for i in range(len(genre_indexes)):
		    genre_tmp=get_genre(art_trm,genre_indexes[i])   #genre that corresponds to the highest freq
		    genres_so_far=genre_dict.get_genre_in_dict(genre_tmp) #getting the genre from the dictionary
		    if len(genres_so_far) != 0:
			    for i in genres_so_far:
				final_genre.append(i)
				genre_set=1				#genre was found in dictionary
				  
		
	    
	    if genre_set == 1:
		    col_num=[]
		   
		    for genre in final_genre:
			    column=int(genre)				#getting the column number of the genre
			    col_num.append(column)

		    genre_array=genre_columns(col_num)	         #genre array
 	    else:
		    genre_array=genre_columns(-1)		#the genre was not found in the dictionary

	    transpose_pitch= seg_pitch.transpose() #this is to tranpose the matrix,so we can have 12 rows
	    #arrays containing the aggregate values of the 12 rows
	    seg_pitch_avg=[]
	    seg_pitch_max=[]
	    seg_pitch_min=[]
            seg_pitch_stddev=[]
            seg_pitch_count=[]
	    seg_pitch_sum=[]
            i=0
	    #Getting the aggregate values in the pitches array
	    for row in transpose_pitch:
		   seg_pitch_avg.append(get_avg(row))
		   seg_pitch_max.append(get_max(row))
	           seg_pitch_min.append(get_min(row))
		   seg_pitch_stddev.append(get_stddev(row))
		   seg_pitch_count.append(get_count(row))
                   seg_pitch_sum.append(get_sum(row))
		   i=i+1

	    #extracting information from the timbre array 
            transpose_timbre = seg_pitch.transpose() #tranposing matrix, to have 12 rows
	    #arrays containing the aggregate values of the 12 rows
	    seg_timbre_avg=[]
	    seg_timbre_max=[]
	    seg_timbre_min=[]
            seg_timbre_stddev=[]
            seg_timbre_count=[]
	    seg_timbre_sum=[]
            i=0
	    for row in transpose_timbre:
		   seg_timbre_avg.append(get_avg(row))
		   seg_timbre_max.append(get_max(row))
	           seg_timbre_min.append(get_min(row))
		   seg_timbre_stddev.append(get_stddev(row))
		   seg_timbre_count.append(get_count(row))
                   seg_timbre_sum.append(get_sum(row))
		   i=i+1
		


		#Writing to the flat file
            writer.writerow([title,album,artist_name,year,duration,seg_start_count, tempo])

	    h5.close()
	    count=count+1;
	    print count;
def data_to_flat_file(basedir, ext='.h5'):
    """This function extract the information from the tables and creates the flat file."""
    count = 0
    #song counter
    list_to_write = []
    row_to_write = ""
    writer = csv.writer(open("metadata_wholeA.csv", "wb"))
    for root, dirs, files in os.walk(basedir):
        files = glob.glob(os.path.join(root, '*' + ext))
        for f in files:
            print f  #the name of the file
            h5 = hdf5_getters.open_h5_file_read(f)
            title = hdf5_getters.get_title(h5)
            title = title.replace('"', '')
            comma = title.find(',')  #eliminating commas in the title
            if comma != -1:
                print title
                time.sleep(1)
            album = hdf5_getters.get_release(h5)
            album = album.replace('"', '')  #eliminating commas in the album
            comma = album.find(',')
            if comma != -1:
                print album
                time.sleep(1)
            artist_name = hdf5_getters.get_artist_name(h5)
            comma = artist_name.find(',')
            if comma != -1:
                print artist_name
                time.sleep(1)
            artist_name = artist_name.replace('"',
                                              '')  #eliminating double quotes
            duration = hdf5_getters.get_duration(h5)
            samp_rt = hdf5_getters.get_analysis_sample_rate(h5)
            artist_7digitalid = hdf5_getters.get_artist_7digitalid(h5)
            artist_fam = hdf5_getters.get_artist_familiarity(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(artist_fam) == True:
                artist_fam = -1
            artist_hotness = hdf5_getters.get_artist_hotttnesss(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(artist_hotness) == True:
                artist_hotness = -1
            artist_id = hdf5_getters.get_artist_id(h5)
            artist_lat = hdf5_getters.get_artist_latitude(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(artist_lat) == True:
                artist_lat = -1
            artist_loc = hdf5_getters.get_artist_location(h5)
            #checks artist_loc to see if it is a hyperlink if it is set as empty string
            artist_loc = artist_loc.replace(",", "\,")
            if artist_loc.startswith("<a"):
                artist_loc = ""
            if len(artist_loc) > 100:
                artist_loc = ""
            artist_lon = hdf5_getters.get_artist_longitude(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(artist_lon) == True:
                artist_lon = -1
            artist_mbid = hdf5_getters.get_artist_mbid(h5)
            artist_pmid = hdf5_getters.get_artist_playmeid(h5)
            audio_md5 = hdf5_getters.get_audio_md5(h5)
            danceability = hdf5_getters.get_danceability(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(danceability) == True:
                danceability = -1
            end_fade_in = hdf5_getters.get_end_of_fade_in(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(end_fade_in) == True:
                end_fade_in = -1
            energy = hdf5_getters.get_energy(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(energy) == True:
                energy = -1
            song_key = hdf5_getters.get_key(h5)
            key_c = hdf5_getters.get_key_confidence(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(key_c) == True:
                key_c = -1
            loudness = hdf5_getters.get_loudness(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(loudness) == True:
                loudness = -1
            mode = hdf5_getters.get_mode(h5)
            mode_conf = hdf5_getters.get_mode_confidence(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(mode_conf) == True:
                mode_conf = -1
            release_7digitalid = hdf5_getters.get_release_7digitalid(h5)
            song_hot = hdf5_getters.get_song_hotttnesss(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(song_hot) == True:
                song_hot = -1
            song_id = hdf5_getters.get_song_id(h5)
            start_fade_out = hdf5_getters.get_start_of_fade_out(h5)
            tempo = hdf5_getters.get_tempo(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(tempo) == True:
                tempo = -1
            time_sig = hdf5_getters.get_time_signature(h5)
            time_sig_c = hdf5_getters.get_time_signature_confidence(h5)
            #checking if we get a "nan" if we do we change it to -1
            if numpy.isnan(time_sig_c) == True:
                time_sig_c = -1
            track_id = hdf5_getters.get_track_id(h5)
            track_7digitalid = hdf5_getters.get_track_7digitalid(h5)
            year = hdf5_getters.get_year(h5)
            bars_c = hdf5_getters.get_bars_confidence(h5)
            bars_c_avg = get_avg(bars_c)
            bars_c_max = get_max(bars_c)
            bars_c_min = get_min(bars_c)
            bars_c_stddev = get_stddev(bars_c)
            bars_c_count = get_count(bars_c)
            bars_c_sum = get_sum(bars_c)
            bars_start = hdf5_getters.get_bars_start(h5)
            bars_start_avg = get_avg(bars_start)
            bars_start_max = get_max(bars_start)
            bars_start_min = get_min(bars_start)
            bars_start_stddev = get_stddev(bars_start)
            bars_start_count = get_count(bars_start)
            bars_start_sum = get_sum(bars_start)
            beats_c = hdf5_getters.get_beats_confidence(h5)
            beats_c_avg = get_avg(beats_c)
            beats_c_max = get_max(beats_c)
            beats_c_min = get_min(beats_c)
            beats_c_stddev = get_stddev(beats_c)
            beats_c_count = get_count(beats_c)
            beats_c_sum = get_sum(beats_c)
            beats_start = hdf5_getters.get_beats_start(h5)
            beats_start_avg = get_avg(beats_start)
            beats_start_max = get_max(beats_start)
            beats_start_min = get_min(beats_start)
            beats_start_stddev = get_stddev(beats_start)
            beats_start_count = get_count(beats_start)
            beats_start_sum = get_sum(beats_start)
            sec_c = hdf5_getters.get_sections_confidence(h5)
            sec_c_avg = get_avg(sec_c)
            sec_c_max = get_max(sec_c)
            sec_c_min = get_min(sec_c)
            sec_c_stddev = get_stddev(sec_c)
            sec_c_count = get_count(sec_c)
            sec_c_sum = get_sum(sec_c)
            sec_start = hdf5_getters.get_sections_start(h5)
            sec_start_avg = get_avg(sec_start)
            sec_start_max = get_max(sec_start)
            sec_start_min = get_min(sec_start)
            sec_start_stddev = get_stddev(sec_start)
            sec_start_count = get_count(sec_start)
            sec_start_sum = get_sum(sec_start)
            seg_c = hdf5_getters.get_segments_confidence(h5)
            seg_c_avg = get_avg(seg_c)
            seg_c_max = get_max(seg_c)
            seg_c_min = get_min(seg_c)
            seg_c_stddev = get_stddev(seg_c)
            seg_c_count = get_count(seg_c)
            seg_c_sum = get_sum(seg_c)
            seg_loud_max = hdf5_getters.get_segments_loudness_max(h5)
            seg_loud_max_avg = get_avg(seg_loud_max)
            seg_loud_max_max = get_max(seg_loud_max)
            seg_loud_max_min = get_min(seg_loud_max)
            seg_loud_max_stddev = get_stddev(seg_loud_max)
            seg_loud_max_count = get_count(seg_loud_max)
            seg_loud_max_sum = get_sum(seg_loud_max)
            seg_loud_max_time = hdf5_getters.get_segments_loudness_max_time(h5)
            seg_loud_max_time_avg = get_avg(seg_loud_max_time)
            seg_loud_max_time_max = get_max(seg_loud_max_time)
            seg_loud_max_time_min = get_min(seg_loud_max_time)
            seg_loud_max_time_stddev = get_stddev(seg_loud_max_time)
            seg_loud_max_time_count = get_count(seg_loud_max_time)
            seg_loud_max_time_sum = get_sum(seg_loud_max_time)
            seg_loud_start = hdf5_getters.get_segments_loudness_start(h5)
            seg_loud_start_avg = get_avg(seg_loud_start)
            seg_loud_start_max = get_max(seg_loud_start)
            seg_loud_start_min = get_min(seg_loud_start)
            seg_loud_start_stddev = get_stddev(seg_loud_start)
            seg_loud_start_count = get_count(seg_loud_start)
            seg_loud_start_sum = get_sum(seg_loud_start)
            seg_pitch = hdf5_getters.get_segments_pitches(h5)
            pitch_size = len(seg_pitch)
            seg_start = hdf5_getters.get_segments_start(h5)
            seg_start_avg = get_avg(seg_start)
            seg_start_max = get_max(seg_start)
            seg_start_min = get_min(seg_start)
            seg_start_stddev = get_stddev(seg_start)
            seg_start_count = get_count(seg_start)
            seg_start_sum = get_sum(seg_start)
            seg_timbre = hdf5_getters.get_segments_timbre(h5)
            tatms_c = hdf5_getters.get_tatums_confidence(h5)
            tatms_c_avg = get_avg(tatms_c)
            tatms_c_max = get_max(tatms_c)
            tatms_c_min = get_min(tatms_c)
            tatms_c_stddev = get_stddev(tatms_c)
            tatms_c_count = get_count(tatms_c)
            tatms_c_sum = get_sum(tatms_c)
            tatms_start = hdf5_getters.get_tatums_start(h5)
            tatms_start_avg = get_avg(tatms_start)
            tatms_start_max = get_max(tatms_start)
            tatms_start_min = get_min(tatms_start)
            tatms_start_stddev = get_stddev(tatms_start)
            tatms_start_count = get_count(tatms_start)
            tatms_start_sum = get_sum(tatms_start)

            #Getting the genres
            genre_set = 0  #flag to see if the genre has been set or not
            art_trm = hdf5_getters.get_artist_terms(h5)
            trm_freq = hdf5_getters.get_artist_terms_freq(h5)
            trn_wght = hdf5_getters.get_artist_terms_weight(h5)
            a_mb_tags = hdf5_getters.get_artist_mbtags(h5)
            genre_indexes = get_genre_indexes(
                trm_freq)  #index of the highest freq
            final_genre = []
            genres_so_far = []
            for i in range(len(genre_indexes)):
                genre_tmp = get_genre(
                    art_trm, genre_indexes[i]
                )  #genre that corresponds to the highest freq
                genres_so_far = genre_dict.get_genre_in_dict(
                    genre_tmp)  #getting the genre from the dictionary
                if len(genres_so_far) != 0:
                    for i in genres_so_far:
                        final_genre.append(i)
                        genre_set = 1  #genre was found in dictionary

            if genre_set == 1:
                col_num = []

                for genre in final_genre:
                    column = int(
                        genre)  #getting the column number of the genre
                    col_num.append(column)

                genre_array = genre_columns(col_num)  #genre array
            else:
                genre_array = genre_columns(
                    -1)  #the genre was not found in the dictionary

            transpose_pitch = seg_pitch.transpose(
            )  #this is to tranpose the matrix,so we can have 12 rows
            #arrays containing the aggregate values of the 12 rows
            seg_pitch_avg = []
            seg_pitch_max = []
            seg_pitch_min = []
            seg_pitch_stddev = []
            seg_pitch_count = []
            seg_pitch_sum = []
            i = 0
            #Getting the aggregate values in the pitches array
            for row in transpose_pitch:
                seg_pitch_avg.append(get_avg(row))
                seg_pitch_max.append(get_max(row))
                seg_pitch_min.append(get_min(row))
                seg_pitch_stddev.append(get_stddev(row))
                seg_pitch_count.append(get_count(row))
                seg_pitch_sum.append(get_sum(row))
                i = i + 1

            #extracting information from the timbre array
            transpose_timbre = seg_pitch.transpose(
            )  #tranposing matrix, to have 12 rows
            #arrays containing the aggregate values of the 12 rows
            seg_timbre_avg = []
            seg_timbre_max = []
            seg_timbre_min = []
            seg_timbre_stddev = []
            seg_timbre_count = []
            seg_timbre_sum = []
            i = 0
            for row in transpose_timbre:
                seg_timbre_avg.append(get_avg(row))
                seg_timbre_max.append(get_max(row))
                seg_timbre_min.append(get_min(row))
                seg_timbre_stddev.append(get_stddev(row))
                seg_timbre_count.append(get_count(row))
                seg_timbre_sum.append(get_sum(row))
                i = i + 1

        #Writing to the flat file
            writer.writerow([
                title, album, artist_name, year, duration, seg_start_count,
                tempo
            ])

            h5.close()
            count = count + 1
            print count
Exemple #38
0
        musicalKey = hdf5_getters.get_key(h5, songidx=row)
        mode = hdf5_getters.get_mode(h5, songidx=row)
        tempo = hdf5_getters.get_tempo(h5, songidx=row)
        time_signature = hdf5_getters.get_time_signature(h5, songidx=row)
        year = hdf5_getters.get_year(h5, songidx=row)
        song_hottness = hdf5_getters.get_song_hotttnesss(h5, songidx=row)
        end_of_fade_in = hdf5_getters.get_end_of_fade_in(h5, songidx=row)
        start_of_fade_out = hdf5_getters.get_start_of_fade_out(h5, songidx=row)

        #timestamp features
        #take last element and divide by length to get beats/unit time, segments/unit_time
        bars_start = hdf5_getters.get_bars_start(h5, songidx=row)
        beats_start = hdf5_getters.get_beats_start(h5, songidx=row)
        sections_start = hdf5_getters.get_sections_start(h5, songidx=row)
        tatums_start = hdf5_getters.get_tatums_start(h5, songidx=row)
        segments_start = hdf5_getters.get_segments_start(h5, songidx=row)
        if len(bars_start) == 0: bars_start = 0.
        else: bars_start = bars_start[-1] / len(bars_start)
        if len(beats_start) == 0: beats_start = 0.
        else: beats_start = beats_start[-1] / len(beats_start)
        if len(sections_start) == 0: sections_start = 0.
        else: sections_start = sections_start[-1] / len(sections_start)
        if len(tatums_start) == 0: tatums_start = 0.
        else: tatums_start = tatums_start[-1] / len(tatums_start)
        if len(segments_start) == 0: segments_start = 0.
        else: segments_start = segments_start[-1] / len(segments_start)

        #time series features
        #take mean
        max_loudness_time = hdf5_getters.get_segments_loudness_max_time(
            h5, songidx=row)
            song.keySignatureConfidence = remove_trap_characters(
                str(hdf5_getters.get_key_confidence(songH5File)))
            song.tempo = remove_trap_characters(
                str(hdf5_getters.get_tempo(songH5File)))
            song.timeSignature = remove_trap_characters(
                str(hdf5_getters.get_time_signature(songH5File)))
            song.timeSignatureConfidence = remove_trap_characters(
                str(hdf5_getters.get_time_signature_confidence(songH5File)))
            song.title = remove_trap_characters(
                str(hdf5_getters.get_title(songH5File)))
            song.year = remove_trap_characters(
                str(hdf5_getters.get_year(songH5File)))
            song.trackID = remove_trap_characters(
                str(hdf5_getters.get_track_id(songH5File)))

            temp = hdf5_getters.get_segments_start(songH5File)
            song.segmentsStart = remove_trap_characters(str(list(temp)))
            song.segmentsCount = get_list_length(temp)
            song.segmentsConfidence = \
                remove_trap_characters(str(list(hdf5_getters.get_segments_confidence(songH5File))))
            # song.segmentsPitches = remove_trap_characters(str(list(hdf5_getters.get_segments_pitches(songH5File))))
            song.segmentsPitches = \
                remove_trap_characters(str(parse_nested_list(hdf5_getters.get_segments_pitches(songH5File))))
            # song.segmentsTimbre = remove_trap_characters(str(list(hdf5_getters.get_segments_timbre(songH5File))))
            song.segmentsTimbre = \
                remove_trap_characters(str(parse_nested_list(hdf5_getters.get_segments_timbre(songH5File))))
            song.segmentsLoudnessMax = \
                remove_trap_characters(str(list(hdf5_getters.get_segments_loudness_max(songH5File))))
            song.segmentsLoudnessMaxTime = \
                remove_trap_characters(str(list(hdf5_getters.get_segments_loudness_max_time(songH5File))))
            song.segmentsLoudnessStart = \
Exemple #40
0
def convert_to_csv():
    i = 0

    data = []
    target = []
    count = 0


    with open('data_timbre.csv', 'w+') as f:
        with open('target_timbre.csv', 'w+') as f2:
            writer = csv.writer(f)
            target_writer = csv.writer(f2)

            for root, dirs, files in os.walk(msd_subset_data_path):
                files = glob.glob(os.path.join(root,'*.h5'))
                for f in sorted(files):
                    try: # Opening is very prone to causing exceptions, we'll just skip file if exception is thrown
                        h5 = getter.open_h5_file_read(f)
                        year = getter.get_year(h5)
                        if year:
                            count +=1

                            analysis_file = open('current_analysis_status.txt','a')
                            update = "Currently at file name: " + str(f) + " and at number " + str(count) + "\n"
                            analysis_file.write(update)
                            print update
                            analysis_file.close()

                            target.append([year])
                            row = []
                            
                            timbre = getter.get_segments_timbre(h5)
                            segstarts = getter.get_segments_start(h5)
                            btstarts = getter.get_beats_start(h5)
                            duration = getter.get_duration(h5)
                            end_of_fade_in = getter.get_end_of_fade_in(h5)
                            key = getter.get_key(h5)
                            key_confidence = getter.get_key_confidence(h5)
                            loudness = getter.get_loudness(h5)
                            start_of_fade_out = getter.get_start_of_fade_out(h5)
                            tempo = getter.get_tempo(h5)
                            time_signature = getter.get_time_signature(h5)
                            time_signature_confidence = getter.get_time_signature_confidence(h5)

                            h5.close() # VERY IMPORTANT

                            segstarts = np.array(segstarts).flatten()
                            btstarts = np.array(btstarts).flatten()

                            bttimbre = align_feats(timbre.T, segstarts, btstarts, duration, end_of_fade_in, key, key_confidence, loudness, start_of_fade_out, tempo, time_signature, time_signature_confidence)

                            if bttimbre is None:
                                continue # Skip this track, some features broken

                            npicks, winsize, finaldim = 12, 12, 144  # Calculated by 12 * 12. 12 is fixed as number of dimensions.
                            processed_feats = extract_and_compress(bttimbre, npicks, winsize, finaldim)
                            n_p_feats = processed_feats.shape[0]

                            if processed_feats is None:
                                continue # Skip this track, some features broken

                            row = processed_feats.flatten()
                            if len(row) != 12*144: # 12 dimensions * 144 features per dimension
                                continue # Not enough features

                            year_row = np.array([year])

                            if row.any() and year_row.any():
                                writer.writerow(row)
                                target_writer.writerow(year_row)

                            i+=1

                        else:
                            h5.close()

                    except Exception:
                        pass



    print 'Finished!'

    analysis_file = open('current_analysis_status.txt','a')
    analysis_file.write('Done!')
    analysis_file.close()

    return 
#from matplotlib.mlab import PCA
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import hdf5_getters as get
import numpy as np
#./A/A/A/TRAAAVO128F93133D4.h5
h5 = get.open_h5_file_read('../data/MillionSongSubset/data/B/I/I/TRBIIAL128F4252C04.h5')
segStart = get.get_segments_start(h5) 
pitch = np.array(get.get_segments_pitches(h5))
pitch_flatten = pitch.flatten()
#timbrePCA = PCA(timbre)

#timbrePCA.fracs

#print timbrePCA.Y
print pitch.shape
print len(segStart)
print len(pitch_flatten)
#print get.get_song_id(h5)



"""
timbre = get.get_segments_timbre(h5,0)
timbre.shape()
print timbre[:1]"""
    duration = hdf5_getters.get_duration(h5)
    time_of_fade_in = hdf5_getters.get_end_of_fade_in(h5)
    energy = hdf5_getters.get_energy(h5)
    key = hdf5_getters.get_key(h5)
    key_confidence = hdf5_getters.get_key_confidence(h5)
    loudness = hdf5_getters.get_loudness(h5)
    mode = hdf5_getters.get_mode(h5)
    mode_confidence = hdf5_getters.get_mode_confidence(h5)
    sections_start = hdf5_getters.get_sections_start(h5)
    num_sections = len(sections_start)
    if num_sections == 0:
        h5.close()
        continue
    segments_loudness_max = hdf5_getters.get_segments_loudness_max(h5)
    segments_loudness_start = hdf5_getters.get_segments_loudness_start(h5)
    num_segments = len(hdf5_getters.get_segments_start(h5))
    num_tatums = len(hdf5_getters.get_tatums_start(h5))
    time_of_fade_out = duration - hdf5_getters.get_start_of_fade_out(h5)
    tempo = hdf5_getters.get_tempo(h5)
    time_signature = hdf5_getters.get_time_signature(h5)
    time_signature_confidence = hdf5_getters.get_time_signature_confidence(h5)

    found_tracks += 1

    # Append all features to an array
    features = []
    features.append(track_id)
    features.append(danceability)
    features.append(duration)
    features.append(time_of_fade_in)
    features.append(energy)
def data_to_flat_file(basedir,ext='.h5') :
    """ This function extracts the information from the tables and creates the flat file. """
    count = 0; #song counter
    list_to_write= []
    group_index=0
    row_to_write = ""
    writer = csv.writer(open("complete.csv", "wb"))
    for root, dirs, files in os.walk(basedir):
	files = glob.glob(os.path.join(root,'*'+ext))
        for f in files:
	    row=[]
	    print f
            h5 = hdf5_getters.open_h5_file_read(f)
	    title = hdf5_getters.get_title(h5) 
	    title= title.replace('"','') 
            row.append(title)
	    comma=title.find(',')
	    if	comma != -1:
		    print title
		    time.sleep(1)
	    album = hdf5_getters.get_release(h5)
	    album= album.replace('"','')
            row.append(album)
	    comma=album.find(',')
	    if	comma != -1:
		    print album
		    time.sleep(1)
	    artist_name = hdf5_getters.get_artist_name(h5)
	    comma=artist_name.find(',')
	    if	comma != -1:
		    print artist_name
		    time.sleep(1)
	    artist_name= artist_name.replace('"','')
            row.append(artist_name)
	    duration = hdf5_getters.get_duration(h5)
            row.append(duration)
	    samp_rt = hdf5_getters.get_analysis_sample_rate(h5)
            row.append(samp_rt)
	    artist_7digitalid = hdf5_getters.get_artist_7digitalid(h5)
            row.append(artist_7digitalid)
	    artist_fam = hdf5_getters.get_artist_familiarity(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_fam) == True:
	            artist_fam=-1
            row.append(artist_fam)
	    artist_hotness= hdf5_getters.get_artist_hotttnesss(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_hotness) == True:
	             artist_hotness=-1
            row.append(artist_hotness)
	    artist_id = hdf5_getters.get_artist_id(h5)
            row.append(artist_id)           
	    artist_lat = hdf5_getters.get_artist_latitude(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_lat) == True:
	            artist_lat=-1
            row.append(artist_lat)
	    artist_loc = hdf5_getters.get_artist_location(h5)
            row.append(artist_loc)
	    artist_lon = hdf5_getters.get_artist_longitude(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(artist_lon) == True:
	            artist_lon=-1
            row.append(artist_lon)
	    artist_mbid = hdf5_getters.get_artist_mbid(h5)
            row.append(artist_mbid)

	    #Getting the genre				       
            art_trm = hdf5_getters.get_artist_terms(h5)
            trm_freq = hdf5_getters.get_artist_terms_freq(h5)
	    trn_wght = hdf5_getters.get_artist_terms_weight(h5)
	    a_mb_tags = hdf5_getters.get_artist_mbtags(h5)
	    genre_indexes=get_genre_indexes(trm_freq) 		    #index of the highest freq
	    genre_set=0					            #flag to see if the genre has been set or not
	    final_genre=[]
	    genres_so_far=[]
	    for i in range(len(genre_indexes)):
		    genre_tmp=get_genre(art_trm,genre_indexes[i])   #genre that corresponds to the highest freq
		    genres_so_far=genre_dict.get_genre_in_dict(genre_tmp) #getting the genre from the dictionary
		    if len(genres_so_far) != 0:
			for i in genres_so_far:
				final_genre.append(i)
			    	genre_set=1
			
			
	    if genre_set == 1:
		col_num=[]
		for i in final_genre:
			column=int(i)				#getting the column number of the genre
			col_num.append(column)
	
		genre_array=genre_columns(col_num)	                #genre array 
	        for i in range(len(genre_array)):                   	#appending the genre_array to the row 
			row.append(genre_array[i])
	    else:
		genre_array=genre_columns(-1)				#when there is no genre matched, return an array of [0...0]
	        for i in range(len(genre_array)):                   	#appending the genre_array to the row 
			row.append(genre_array[i])
					

	    artist_pmid = hdf5_getters.get_artist_playmeid(h5)
            row.append(artist_pmid)
	    audio_md5 = hdf5_getters.get_audio_md5(h5)
            row.append(audio_md5)
	    danceability = hdf5_getters.get_danceability(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(danceability) == True:
	            danceability=-1
            row.append(danceability)
	    end_fade_in =hdf5_getters.get_end_of_fade_in(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(end_fade_in) == True:
	            end_fade_in=-1
            row.append(end_fade_in)
	    energy = hdf5_getters.get_energy(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(energy) == True:
	            energy=-1
            row.append(energy)
            song_key = hdf5_getters.get_key(h5)
            row.append(song_key)
	    key_c = hdf5_getters.get_key_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(key_c) == True:
	            key_c=-1
            row.append(key_c)
	    loudness = hdf5_getters.get_loudness(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(loudness) == True:
	            loudness=-1
            row.append(loudness)
	    mode = hdf5_getters.get_mode(h5)
            row.append(mode)
	    mode_conf = hdf5_getters.get_mode_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(mode_conf) == True:
	            mode_conf=-1
            row.append(mode_conf)
	    release_7digitalid = hdf5_getters.get_release_7digitalid(h5)
            row.append(release_7digitalid)
	    song_hot = hdf5_getters.get_song_hotttnesss(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(song_hot) == True:
	            song_hot=-1
            row.append(song_hot)
	    song_id = hdf5_getters.get_song_id(h5)
            row.append(song_id)
	    start_fade_out = hdf5_getters.get_start_of_fade_out(h5)
            row.append(start_fade_out)
	    tempo = hdf5_getters.get_tempo(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(tempo) == True:
	            tempo=-1
            row.append(tempo)
	    time_sig = hdf5_getters.get_time_signature(h5)
            row.append(time_sig)
	    time_sig_c = hdf5_getters.get_time_signature_confidence(h5)
	    #checking if we get a "nan" if we do we change it to -1
	    if numpy.isnan(time_sig_c) == True:
	            time_sig_c=-1
            row.append(time_sig_c)
	    track_id = hdf5_getters.get_track_id(h5)
            row.append(track_id)
	    track_7digitalid = hdf5_getters.get_track_7digitalid(h5)
            row.append(track_7digitalid)
	    year = hdf5_getters.get_year(h5)
            row.append(year)
	    bars_c = hdf5_getters.get_bars_confidence(h5)
            bars_start = hdf5_getters.get_bars_start(h5)
	    row_bars_padding=padding(245)   #this is the array that will be attached at the end of th row

	    #--------------bars---------------"
	    gral_info=[]
	    gral_info=row[:]
	    empty=[]
	    for i,item in enumerate(bars_c):
                row.append(group_index)
                row.append(i)
                row.append(bars_c[i])
	        bars_c_avg= get_avg(bars_c)
                row.append(bars_c_avg)
	        bars_c_max= get_max(bars_c)	
                row.append(bars_c_max)
	        bars_c_min = get_min(bars_c)
                row.append(bars_c_min)
	        bars_c_stddev= get_stddev(bars_c)
                row.append(bars_c_stddev)
	        bars_c_count = get_count(bars_c)
                row.append(bars_c_count)
	        bars_c_sum = get_sum(bars_c)
                row.append(bars_c_sum)
                row.append(bars_start[i])	         
	        bars_start_avg = get_avg(bars_start)
                row.append(bars_start_avg)	         
	        bars_start_max= get_max(bars_start)
                row.append(bars_start_max)	         
	        bars_start_min = get_min(bars_start)
                row.append(bars_start_min)	         
	        bars_start_stddev= get_stddev(bars_start)
                row.append(bars_start_stddev)	         
	        bars_start_count = get_count(bars_start)
                row.append(bars_start_count)	         
	        bars_start_sum = get_sum(bars_start)
                row.append(bars_start_sum)	         
		for i in row_bars_padding:
			row.append(i)

                writer.writerow(row)
		row=[]
		row=gral_info[:]
	 

            #--------beats---------------"
	    beats_c = hdf5_getters.get_beats_confidence(h5)
	    group_index=1
	    row=[]
	    row=gral_info[:]
	    row_front=padding(14)  	#blanks left in front of the row(empty spaces for bars)
	    row_beats_padding=padding(231)
	    for i,item in enumerate(beats_c):
	   	row.append(group_index)
		row.append(i)
		for index in row_front:  #padding blanks in front of the beats
			row.append(index)
		
		row.append(beats_c[i])
	        beats_c_avg= get_avg(beats_c)
		row.append(beats_c_avg)
	        beats_c_max= get_max(beats_c)
		row.append(beats_c_max)
                beats_c_min = get_min(beats_c)
		row.append(beats_c_min)
	        beats_c_stddev= get_stddev(beats_c)
		row.append(beats_c_stddev)
	        beats_c_count = get_count(beats_c)
		row.append(beats_c_count)
	        beats_c_sum = get_sum(beats_c)
		row.append(beats_c_sum)
                beats_start = hdf5_getters.get_beats_start(h5)
		row.append(beats_start[i])
 	        beats_start_avg = get_avg(beats_start)
		row.append(beats_start_avg)
	        beats_start_max= get_max(beats_start)
		row.append(beats_start_max)
	        beats_start_min = get_min(beats_start)
		row.append(beats_start_min)
	        beats_start_stddev= get_stddev(beats_start)
		row.append(beats_start_stddev)
	        beats_start_count = get_count(beats_start)
		row.append(beats_start_count)
	        beats_start_sum = get_sum(beats_start)
		row.append(beats_start_sum)
		for i in row_beats_padding:
			row.append(i)
                
		writer.writerow(row)
		row=[]
		row=gral_info[:]

            # "--------sections---------------"
	    row_sec_padding=padding(217)	#blank spaces left at the end of the row
	    sec_c = hdf5_getters.get_sections_confidence(h5)
	    group_index=2
	    row=[]
	    row=gral_info[:]
	    row_front=padding(28)		#blank spaces left in front(empty spaces for bars,beats)
	    for i,item in enumerate(sec_c):
		row.append(group_index)
		row.append(i)
		for index in row_front:  	#padding blanks in front of the sections
			row.append(index)

		row.append(sec_c[i])
                sec_c_avg= get_avg(sec_c)
		row.append(sec_c_avg)
	        sec_c_max= get_max(sec_c)
		row.append(sec_c_max)
	        sec_c_min = get_min(sec_c)
		row.append(sec_c_min)
	        sec_c_stddev= get_stddev(sec_c)
		row.append(sec_c_stddev)
	        sec_c_count = get_count(sec_c)
		row.append(sec_c_count)
	        sec_c_sum = get_sum(sec_c)
		row.append(sec_c_sum)
	        sec_start = hdf5_getters.get_sections_start(h5)
		row.append(sec_start[i])	   
                sec_start_avg = get_avg(sec_start)
		row.append(sec_start_avg)
	        sec_start_max= get_max(sec_start)
		row.append(sec_start_max)
	        sec_start_min = get_min(sec_start)
		row.append(sec_start_min)
	        sec_start_stddev= get_stddev(sec_start)
		row.append(sec_start_stddev)
	        sec_start_count = get_count(sec_start)
		row.append(sec_start_count)
	        sec_start_sum = get_sum(sec_start)
		row.append(sec_start_sum)
		for i in row_sec_padding:	#appending the blank spaces at the end of the row
			row.append(i)
                

		writer.writerow(row)
		row=[]
		row=gral_info[:]


            #--------segments---------------"
	    row_seg_padding=padding(182)	#blank spaces at the end of the row
 	    row_front=padding(42)		#blank spaces left in front of segments
	    seg_c = hdf5_getters.get_segments_confidence(h5)
	    group_index=3
	    row=[]
	    row=gral_info[:]
	    for i,item in enumerate(seg_c):
		row.append(group_index)
		row.append(i)
		for index in row_front:  	#padding blanks in front of the segments
			row.append(index)

		row.append(seg_c[i])
                seg_c_avg= get_avg(seg_c)
		row.append(seg_c_avg)
	        seg_c_max= get_max(seg_c)
		row.append(seg_c_max)
	        seg_c_min = get_min(seg_c)
		row.append(seg_c_min)
	        seg_c_stddev= get_stddev(seg_c)
		row.append(seg_c_stddev)
	        seg_c_count = get_count(seg_c)
		row.append(seg_c_count)
	        seg_c_sum = get_sum(seg_c)
		row.append(seg_c_sum)
                seg_loud_max = hdf5_getters.get_segments_loudness_max(h5)
		row.append(seg_loud_max[i])
                seg_loud_max_avg= get_avg(seg_loud_max)
		row.append(seg_loud_max_avg)
	        seg_loud_max_max= get_max(seg_loud_max)
		row.append(seg_loud_max_max)
	        seg_loud_max_min = get_min(seg_loud_max)
		row.append(seg_loud_max_min)
	        seg_loud_max_stddev= get_stddev(seg_loud_max)
		row.append(seg_loud_max_stddev)
	        seg_loud_max_count = get_count(seg_loud_max)
		row.append(seg_loud_max_count)
	        seg_loud_max_sum = get_sum(seg_loud_max)
		row.append(seg_loud_max_sum)
	        seg_loud_max_time = hdf5_getters.get_segments_loudness_max_time(h5)
		row.append(seg_loud_max_time[i])
	        seg_loud_max_time_avg= get_avg(seg_loud_max_time)
		row.append(seg_loud_max_time_avg)
	        seg_loud_max_time_max= get_max(seg_loud_max_time)
		row.append(seg_loud_max_time_max)
	        seg_loud_max_time_min = get_min(seg_loud_max_time)
		row.append(seg_loud_max_time_min)
	        seg_loud_max_time_stddev= get_stddev(seg_loud_max_time)
		row.append(seg_loud_max_time_stddev)
	        seg_loud_max_time_count = get_count(seg_loud_max_time)
		row.append(seg_loud_max_time_count)
	        seg_loud_max_time_sum = get_sum(seg_loud_max_time)
		row.append(seg_loud_max_time_sum)
	        seg_loud_start = hdf5_getters.get_segments_loudness_start(h5)
		row.append(seg_loud_start[i])
	        seg_loud_start_avg= get_avg(seg_loud_start)
		row.append(seg_loud_start_avg)
	        seg_loud_start_max= get_max(seg_loud_start)
		row.append(seg_loud_start_max)
	        seg_loud_start_min = get_min(seg_loud_start)
		row.append(seg_loud_start_min)
	        seg_loud_start_stddev= get_stddev(seg_loud_start)
		row.append(seg_loud_start_stddev)
	        seg_loud_start_count = get_count(seg_loud_start)
		row.append(seg_loud_start_count)
	        seg_loud_start_sum = get_sum(seg_loud_start)					      
		row.append(seg_loud_start_sum)
	        seg_start = hdf5_getters.get_segments_start(h5)
		row.append(seg_start[i])
	        seg_start_avg= get_avg(seg_start)
		row.append(seg_start_avg)
	        seg_start_max= get_max(seg_start)
		row.append(seg_start_max)
	        seg_start_min = get_min(seg_start)
		row.append(seg_start_min)
	        seg_start_stddev= get_stddev(seg_start)
		row.append(seg_start_stddev)
	        seg_start_count = get_count(seg_start)
		row.append(seg_start_count)
	        seg_start_sum = get_sum(seg_start)
		row.append(seg_start_sum)
		for i in row_seg_padding:	#appending blank spaces at the end of the row
			row.append(i)
                
		writer.writerow(row)
		row=[]
		row=gral_info[:]

	    #----------segments pitch and timbre---------------"
	    row_seg2_padding=padding(14)	#blank spaces left at the end of the row
	    row_front=padding(77)		#blank spaces left at the front of the segments and timbre
	    seg_pitch = hdf5_getters.get_segments_pitches(h5)
	    transpose_pitch= seg_pitch.transpose()          #this is to tranpose the matrix,so we can have 12 rows
	    group_index=4
	    row=[]
	    row=gral_info[:]
	    for i,item in enumerate(transpose_pitch[0]):
		row.append(group_index)
		row.append(i)
		for index in row_front:  	#padding blanks in front of segments and timbre
			row.append(index)
	   
		row.append(transpose_pitch[0][i])
  		seg_pitch_avg= get_avg(transpose_pitch[0])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[0])	
		row.append(seg_pitch_max)
		seg_pitch_min = get_min(transpose_pitch[0])
		row.append(seg_pitch_min)
		seg_pitch_stddev= get_stddev(transpose_pitch[0])
		row.append(seg_pitch_stddev)
		seg_pitch_count = get_count(transpose_pitch[0])
		row.append(seg_pitch_count)
		seg_pitch_sum = get_sum(transpose_pitch[0])
		row.append(seg_pitch_sum)   
 		row.append(transpose_pitch[1][i])
 		seg_pitch_avg= get_avg(transpose_pitch[1])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[1])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[1])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[1])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[1])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[1])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[2][i])
 		seg_pitch_avg= get_avg(transpose_pitch[2])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[2])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[2])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[2])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[2])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[2])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[3][i])
 		seg_pitch_avg= get_avg(transpose_pitch[3])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[3])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[3])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[3])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[3])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[3])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[4][i])
 		seg_pitch_avg= get_avg(transpose_pitch[4])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[4])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[4])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[4])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[4])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[4])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[5][i])
 		seg_pitch_avg= get_avg(transpose_pitch[5])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[5])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[5])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[5])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[5])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[5])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[6][i])
 		seg_pitch_avg= get_avg(transpose_pitch[6])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[6])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[6])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[6])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[6])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[6])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[7][i])
 		seg_pitch_avg= get_avg(transpose_pitch[7])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[7])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[7])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[7])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[7])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[7])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[8][i])
 		seg_pitch_avg= get_avg(transpose_pitch[8])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[8])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[8])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[8])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[8])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[8])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[9][i])
 		seg_pitch_avg= get_avg(transpose_pitch[9])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[9])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[9])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[9])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[9])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[9])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[10][i])
 		seg_pitch_avg= get_avg(transpose_pitch[10])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[10])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[10])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[10])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[10])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[10])
		row.append(seg_pitch_sum)   
		row.append(transpose_pitch[11][i])
 		seg_pitch_avg= get_avg(transpose_pitch[11])
		row.append(seg_pitch_avg)
		seg_pitch_max= get_max(transpose_pitch[11])	
		row.append(seg_pitch_max)
	        seg_pitch_min = get_min(transpose_pitch[11])
		row.append(seg_pitch_min)
	        seg_pitch_stddev= get_stddev(transpose_pitch[11])
		row.append(seg_pitch_stddev)
	        seg_pitch_count = get_count(transpose_pitch[11])
		row.append(seg_pitch_count)
	        seg_pitch_sum = get_sum(transpose_pitch[11])
		row.append(seg_pitch_sum)   
		#timbre arrays
	        seg_timbre = hdf5_getters.get_segments_timbre(h5)
                transpose_timbre = seg_pitch.transpose() #tranposing matrix, to have 12 rows
		row.append(transpose_timbre[0][i])
  		seg_timbre_avg= get_avg(transpose_timbre[0])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[0])	
		row.append(seg_timbre_max)
		seg_timbre_min = get_min(transpose_timbre[0])
		row.append(seg_timbre_min)
		seg_timbre_stddev=get_stddev(transpose_timbre[0])
		row.append(seg_timbre_stddev)
		seg_timbre_count = get_count(transpose_timbre[0])
		row.append(seg_timbre_count)
		seg_timbre_sum = get_sum(transpose_timbre[0])
		row.append(seg_timbre_sum)   
 		row.append(transpose_timbre[1][i])
 		seg_timbre_avg= get_avg(transpose_timbre[1])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[1])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[1])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[1])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[1])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[1])
		row.append(seg_timbre_sum)   
		row.append(transpose_timbre[2][i])
 		seg_timbre_avg= get_avg(transpose_timbre[2])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[2])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[2])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[2])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[2])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[2])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[3][i])
 		seg_timbre_avg= get_avg(transpose_timbre[3])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[3])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[3])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[3])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[3])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[3])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[4][i])
 		seg_timbre_avg= get_avg(transpose_timbre[4])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[4])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[4])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[4])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[4])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[4])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[5][i])
 		seg_timbre_avg= get_avg(transpose_timbre[5])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[5])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[5])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[5])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[5])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[5])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[6][i])
 		seg_timbre_avg= get_avg(transpose_timbre[6])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[6])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[6])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[6])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[6])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[6])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[7][i])
 		seg_timbre_avg= get_avg(transpose_timbre[7])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[7])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[7])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[7])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[7])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[7])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[8][i])
 		seg_timbre_avg= get_avg(transpose_timbre[8])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[8])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[8])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[8])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[8])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[8])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[9][i])
 		seg_timbre_avg= get_avg(transpose_timbre[9])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[9])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[9])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[9])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[9])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[9])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[10][i])
 		seg_timbre_avg= get_avg(transpose_timbre[10])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[10])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[10])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[10])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[10])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[10])
		row.append(seg_timbre_sum)   
		
		row.append(transpose_timbre[11][i])
 		seg_timbre_avg= get_avg(transpose_timbre[11])
		row.append(seg_timbre_avg)
		seg_timbre_max= get_max(transpose_timbre[11])	
		row.append(seg_timbre_max)
	        seg_timbre_min = get_min(transpose_timbre[11])
		row.append(seg_timbre_min)
	        seg_timbre_stddev= get_stddev(transpose_timbre[11])
		row.append(seg_timbre_stddev)
	        seg_timbre_count = get_count(transpose_timbre[11])
		row.append(seg_timbre_count)
	        seg_timbre_sum = get_sum(transpose_timbre[11])
		row.append(seg_timbre_sum)
	        for item in row_seg2_padding:
			row.append(item)
		writer.writerow(row)
		row=[]
		row=gral_info[:]


            # "--------tatums---------------"
	    tatms_c = hdf5_getters.get_tatums_confidence(h5)
	    group_index=5
	    row_front=padding(245)	#blank spaces left in front of tatums
	    row=[]
	    row=gral_info[:]
	    for i,item in enumerate(tatms_c):
		row.append(group_index)
		row.append(i)
		for item in row_front:	#appending blank spaces at the front of the row
			row.append(item)

		row.append(tatms_c[i])
		tatms_c_avg= get_avg(tatms_c)
		row.append(tatms_c_avg)
	 	tatms_c_max= get_max(tatms_c)
		row.append(tatms_c_max)
	        tatms_c_min = get_min(tatms_c)
		row.append(tatms_c_min)
	        tatms_c_stddev= get_stddev(tatms_c)
		row.append(tatms_c_stddev)
                tatms_c_count = get_count(tatms_c)
		row.append(tatms_c_count)
                tatms_c_sum = get_sum(tatms_c)
		row.append(tatms_c_sum)
                tatms_start = hdf5_getters.get_tatums_start(h5)
		row.append(tatms_start[i])
	        tatms_start_avg= get_avg(tatms_start)
		row.append(tatms_start_avg)
	        tatms_start_max= get_max(tatms_start)
		row.append(tatms_start_max)
	        tatms_start_min = get_min(tatms_start)
		row.append(tatms_start_min)
	        tatms_start_stddev= get_stddev(tatms_start)
		row.append(tatms_start_stddev)
	        tatms_start_count = get_count(tatms_start)
		row.append(tatms_start_count)
	        tatms_start_sum = get_sum(tatms_start)				   
		row.append(tatms_start_sum)
		writer.writerow(row)
		row=[]
		row=gral_info[:]


 
	    transpose_pitch= seg_pitch.transpose() #this is to tranpose the matrix,so we can have 12 rows
	    #arrays containing the aggregate values of the 12 rows
	    seg_pitch_avg=[]
	    seg_pitch_max=[]
	    seg_pitch_min=[]
            seg_pitch_stddev=[]
            seg_pitch_count=[]
	    seg_pitch_sum=[]
            i=0
	    #Getting the aggregate values in the pitches array
	    for row in transpose_pitch:
		   seg_pitch_avg.append(get_avg(row))
		   seg_pitch_max.append(get_max(row))
	           seg_pitch_min.append(get_min(row))
		   seg_pitch_stddev.append(get_stddev(row))
		   seg_pitch_count.append(get_count(row))
                   seg_pitch_sum.append(get_sum(row))
		   i=i+1

	    #extracting information from the timbre array 
            transpose_timbre = seg_pitch.transpose() #tranposing matrix, to have 12 rows
	    #arrays containing the aggregate values of the 12 rows
	    seg_timbre_avg=[]
	    seg_timbre_max=[]
	    seg_timbre_min=[]
            seg_timbre_stddev=[]
            seg_timbre_count=[]
	    seg_timbre_sum=[]
            i=0
	    for row in transpose_timbre:
		   seg_timbre_avg.append(get_avg(row))
		   seg_timbre_max.append(get_max(row))
	           seg_timbre_min.append(get_min(row))
		   seg_timbre_stddev.append(get_stddev(row))
		   seg_timbre_count.append(get_count(row))
                   seg_timbre_sum.append(get_sum(row))
		   i=i+1








	    h5.close()
	    count=count+1;
	    print count;
Exemple #44
0
def getInfo(files):
    data = []
    build_str = ''
    with open(sys.argv[1], 'r') as f:
        contents = f.read()
        c = contents.split()
    f.close()
    print("creating csv with following fields:" + contents)
    for i in c:
        build_str = build_str + i + ','
    build_str = build_str[:-1]
    build_str = build_str + '\n'
    for fil in files:
        curFile = getters.open_h5_file_read(fil)
        d2 = {}
        get_table = {'track_id': getters.get_track_id(curFile), 'segments_pitches': getters.get_segments_pitches(curFile), 'time_signature_confidence': getters.get_time_signature_confidence(curFile), 'song_hotttnesss': getters.get_song_hotttnesss(curFile), 'artist_longitude': getters.get_artist_longitude(curFile), 'tatums_confidence': getters.get_tatums_confidence(curFile), 'num_songs': getters.get_num_songs(curFile), 'duration': getters.get_duration(curFile), 'start_of_fade_out': getters.get_start_of_fade_out(curFile), 'artist_name': getters.get_artist_name(curFile), 'similar_artists': getters.get_similar_artists(curFile), 'artist_mbtags': getters.get_artist_mbtags(curFile), 'artist_terms_freq': getters.get_artist_terms_freq(curFile), 'release': getters.get_release(curFile), 'song_id': getters.get_song_id(curFile), 'track_7digitalid': getters.get_track_7digitalid(curFile), 'title': getters.get_title(curFile), 'artist_latitude': getters.get_artist_latitude(curFile), 'energy': getters.get_energy(curFile), 'key': getters.get_key(curFile), 'release_7digitalid': getters.get_release_7digitalid(curFile), 'artist_mbid': getters.get_artist_mbid(curFile), 'segments_confidence': getters.get_segments_confidence(curFile), 'artist_hotttnesss': getters.get_artist_hotttnesss(curFile), 'time_signature': getters.get_time_signature(curFile), 'segments_loudness_max_time': getters.get_segments_loudness_max_time(curFile), 'mode': getters.get_mode(curFile), 'segments_loudness_start': getters.get_segments_loudness_start(curFile), 'tempo': getters.get_tempo(curFile), 'key_confidence': getters.get_key_confidence(curFile), 'analysis_sample_rate': getters.get_analysis_sample_rate(curFile), 'bars_confidence': getters.get_bars_confidence(curFile), 'artist_playmeid': getters.get_artist_playmeid(curFile), 'artist_terms_weight': getters.get_artist_terms_weight(curFile), 'segments_start': getters.get_segments_start(curFile), 'artist_location': getters.get_artist_location(curFile), 'loudness': getters.get_loudness(curFile), 'year': getters.get_year(curFile), 'artist_7digitalid': getters.get_artist_7digitalid(curFile), 'audio_md5': getters.get_audio_md5(curFile), 'segments_timbre': getters.get_segments_timbre(curFile), 'mode_confidence': getters.get_mode_confidence(curFile), 'end_of_fade_in': getters.get_end_of_fade_in(curFile), 'danceability': getters.get_danceability(curFile), 'artist_familiarity': getters.get_artist_familiarity(curFile), 'artist_mbtags_count': getters.get_artist_mbtags_count(curFile), 'tatums_start': getters.get_tatums_start(curFile), 'artist_id': getters.get_artist_id(curFile), 'segments_loudness_max': getters.get_segments_loudness_max(curFile), 'bars_start': getters.get_bars_start(curFile), 'beats_start': getters.get_beats_start(curFile), 'artist_terms': getters.get_artist_terms(curFile), 'sections_start': getters.get_sections_start(curFile), 'beats_confidence': getters.get_beats_confidence(curFile), 'sections_confidence': getters.get_sections_confidence(curFile)}
        tid = fil.split('/')[-1].split('.')[0]
        # print(c)
        for i in c:
            if i in get_table: 
               d2[i] = get_table[i]
               d2[i] = str(d2[i]).replace('\n','')  
               build_str = build_str + d2[i] + ','
            else:
                print('error: unspecified field')
                exit(0)
        build_str = build_str[:-1]
        # print(build_str[:-1])
        build_str = build_str + '\n'
        curFile.close()
    build_str = build_str.replace('b','').replace("'",'').replace('"','')  
    return (build_str)