Exemple #1
0
def test_spectrogram_db_magnituds_should_be_in_proper_range():
    frame_size = 4096
    hop_size = 4096
    audio_file = os.path.join(DATA_DIR, 'she_brings_to_me.wav')
    signal_frames = SignalFrames(audio_file,
                                 frame_size,
                                 hop_size,
                                 mono_mix=True)
    w = create_window(frame_size)
    X = stft_spectrogram(signal_frames.frames, w, magnitudes='power_db')
    assert np.all(X >= -120), 'min value: %f should be >= -120' % X.min()
    assert np.all(X <= 0), 'max value: %f should be <= 0' % X.max()
def prepare_chomagram_and_labels(album, song_title, block_size, hop_size,
                                 bin_range, bin_division):

    song = 'The_Beatles/' + album + '/' + song_title
    data_dir = '../data/beatles'
    audio_file = data_dir + '/audio-cd/' + song + '.wav'
    chord_file = data_dir + '/chordlab/' + song + '.lab.pcs.tsv'
    audio_file, chord_file

    # ## Load audio
    print('loading audio:', audio_file)

    x, fs = load_wav(audio_file)

    print('sampling rate:', fs, 'Hz')
    print('number of samples:', len(x))
    print('duration in audio:', len(x) / fs, 'sec')

    # ## Load chords
    print('loading chords:', chord_file)
    chords = pd.read_csv(chord_file, sep='\t')
    print('shape:', chords.shape)
    print('duration in chords:', chords['end'].iloc[-1])

    pcs_cols = [
        'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab', 'A', 'Bb', 'B'
    ]
    label_cols = ['label', 'root', 'bass'] + pcs_cols

    # ## Split audio to blocks

    x_blocks, x_times = split_to_blocks(x, block_size, hop_size, fs)
    print('blocks shape:', x_blocks.shape)
    print('number of blocks:', len(x_blocks))
    # start times for each block
    print('last block starts at:', x_times[-1], 'sec')

    # ## Mapping of chords to blocks

    def chords_to_blocks(chords, block_center_times):
        chord_ix = 0
        for t in block_center_times:
            yield chords.iloc[i][pcs_cols]

    def time_to_samples(time):
        return np.round(time * fs)

    chords['start_sample'] = time_to_samples(chords['start'])
    chords['end_sample'] = time_to_samples(chords['end'])
    df_blocks = pd.DataFrame(
        {'start': time_to_samples(x_times).astype(np.int64)})
    df_blocks['end'] = df_blocks['start'] + block_size

    label_dict = chords[label_cols].drop_duplicates().set_index('label')

    df_labels = chords[['start_sample', 'end_sample', 'label']].copy()
    df_labels.rename(columns={
        'start_sample': 'start',
        'end_sample': 'end'
    },
                     inplace=True)

    df_labelled_blocks = block_labels(df_blocks, df_labels)

    df_block_pcs = df_labelled_blocks[['label']].join(
        label_dict, on='label')[['label'] + pcs_cols]

    assert len(df_block_pcs) == len(df_blocks)

    block_labels_file = '{}/chord-pcs/{}_{}/{}.pcs'.format(
        data_dir, block_size, hop_size, song)
    print('block labels file:', block_labels_file)

    os.makedirs(os.path.dirname(block_labels_file), exist_ok=True)
    df_block_pcs.to_csv(block_labels_file, sep='\t', index=False)

    # ## Chromagram features

    w = create_window(block_size)
    X_chromagram = chromagram(x_blocks,
                              w,
                              fs,
                              to_log=True,
                              bin_range=bin_range,
                              bin_division=bin_division)

    chromagram_file = '{}/chromagram/block={}_hop={}_bins={},{}_div={}/{}.npz'.format(
        data_dir, block_size, hop_size, bin_range[0], bin_range[1],
        bin_division, song)

    print('chomagram file:', chromagram_file)

    os.makedirs(os.path.dirname(chromagram_file), exist_ok=True)
    np.savez_compressed(chromagram_file, X=X_chromagram, times=x_times)
# features = data['X']
# times = data['times']

### Chord labels

# df_labels = pd.read_csv(labels_file, sep='\t')
# labels_pcs = df_labels[df_labels.columns[1:]].as_matrix()

block_size = 4096
hop_size = 2048

print('loading audio:', audio_file)
x, fs = load_wav(audio_file)
print('splitting audio to blocks')
x_blocks, times = split_to_blocks(x, block_size, hop_size)
w = create_window(block_size)
print('computing chromagram')
X_chromagram = chromagram(x_blocks, w, fs, to_log=True)
features = X_chromagram

## Data preprocessing

### Features

print('scaling the input features')
# scaler = MinMaxScaler()
# X = scaler.fit_transform(features).astype('float32')
# TODO: there's a bug: should be + 120 on both places!!!
X = (features.astype('float32') - 120) / (features.shape[1] - 120)

# reshape for 1D convolution
# features = data['X']
# times = data['times']

### Chord labels

# df_labels = pd.read_csv(labels_file, sep='\t')
# labels_pcs = df_labels[df_labels.columns[1:]].as_matrix()

block_size = 4096
hop_size = 2048

print('loading audio:', audio_file)
print('splitting audio to blocks')
signal_frames = SignalFrames(audio_file, frame_size=block_size, hop_size=hop_size)
x_blocks, x_times, fs = signal_frames.frames, signal_frames.start_times, signal_frames.sample_rate
w = create_window(block_size)
print('computing chromagram')
X_chromagram = chromagram(x_blocks, w, fs, to_log=True)
features = X_chromagram

## Data preprocessing

### Features

print('scaling the input features')
# scaler = MinMaxScaler()
# X = scaler.fit_transform(features).astype('float32')
# TODO: there's a bug: should be + 120 on both places!!!
X = (features.astype('float32') - 120) / (features.shape[1] - 120)

# reshape for 1D convolution
Exemple #5
0
 def assert_ok(size):
     w = create_window(size)
     assert np.allclose(energy(w), len(w))
     assert np.allclose(mean_power(w), 1.0)