Пример #1
0
        savepath = saveFolder + filename
        tmp = loadmat(stftFilePathList[i])
        Y1 = np.ndarray.transpose(tmp['HD'])

        filename2 = stftFilePathList2[i][-11:-4]
        tmp = loadmat(stftFilePathList2[i])
        Y2 = np.ndarray.transpose(tmp['HD'])

        filename3 = stftFilePathList3[i][-11:-4]
        tmp = loadmat(stftFilePathList3[i])
        Y3 = np.ndarray.transpose(tmp['HD'])

        '''
        ==== scale the data
        '''
        [y1_0, dump, dump] = scaleData(Y1[:, 0])
        [y1_1, dump, dump] = scaleData(Y1[:, 1])
        [y1_2, dump, dump] = scaleData(Y1[:, 2])

        [y2_0, dump, dump] = scaleData(Y2[:, 0])
        [y2_1, dump, dump] = scaleData(Y2[:, 1])
        [y2_2, dump, dump] = scaleData(Y2[:, 2])

        [y3_0, dump, dump] = scaleData(Y3[:, 0])
        [y3_1, dump, dump] = scaleData(Y3[:, 1])
        [y3_2, dump, dump] = scaleData(Y3[:, 2])

        hh = np.add(np.add(y1_0, y2_0), y3_0)
        bd = np.add(np.add(y1_1, y2_1), y3_1)
        sd = np.add(np.add(y1_2, y2_2), y3_2)
        all = [np.ndarray.flatten(hh), np.ndarray.flatten(bd), np.ndarray.flatten(sd)]
Пример #2
0
        for i in range(0, 200):  #len(stftFilePathList)):
            tmp = loadmat(cqtFilePathList[i])
            X_song = np.ndarray.transpose(tmp['Xcqt'])
            tmp = loadmat(pseudoLabelFilePathList[i])
            Y_song = np.ndarray.transpose(tmp['HD'])
            assert (len(X_song) == len(Y_song)
                    ), 'dimensionality mismatch between CQT and Pseudo-Labels!'
            '''
            ==== Concatenating matrices
            '''
            X = np.concatenate((X, X_song), axis=0)
            Y = np.concatenate((Y, Y_song), axis=0)
'''
==== Training
'''
[y_hh_scaled, dump, dump] = scaleData(Y[:, 0])
[y_kd_scaled, dump, dump] = scaleData(Y[:, 1])
[y_sd_scaled, dump, dump] = scaleData(Y[:, 2])
y_all = np.concatenate((y_hh_scaled, y_kd_scaled, y_sd_scaled), axis=1)
print '==== training ====\n'

#==== feeding CQT and delta CQT
X_diff = np.diff(X, axis=0)
finalRow = np.zeros((1, np.size(X, 1)))
X_diff = np.concatenate((X_diff, finalRow), axis=0)
X_all = np.concatenate((X, X_diff), axis=1)

model.fit(X_all,
          y_all,
          epochs=50,
          batch_size=640,
                # thresNvt_hh = thresNvt(Y[:, 0], thresCurve_hh)
                # thresNvt_bd = thresNvt(Y[:, 1], thresCurve_bd)
                # thresNvt_sd = thresNvt(Y[:, 2], thresCurve_sd)

                # dump, onsetInSec_hh = findPeaks(Y[:, 0], thresCurve_hh, fs, hopSize)
                # dump, onsetInSec_bd = findPeaks(Y[:, 1], thresCurve_bd, fs, hopSize)
                # dump, onsetInSec_sd = findPeaks(Y[:, 2], thresCurve_sd, fs, hopSize)
                #
                # onsetInBinary_hh = onset2BinaryVector(onsetInSec_hh, len(Y[:, 0]), hopSize, fs)
                # onsetInBinary_bd = onset2BinaryVector(onsetInSec_bd, len(Y[:, 1]), hopSize, fs)
                # onsetInBinary_sd = onset2BinaryVector(onsetInSec_sd, len(Y[:, 2]), hopSize, fs)
                '''
                ==== Training
                '''
                [y_hh_scaled, dump, dump] = scaleData(thresCurve_hh)
                [y_kd_scaled, dump, dump] = scaleData(thresCurve_bd)
                [y_sd_scaled, dump, dump] = scaleData(thresCurve_sd)
                y_all = np.concatenate((y_hh_scaled, y_kd_scaled, y_sd_scaled),
                                       axis=1)

                #y_all = np.concatenate((onsetInBinary_hh, onsetInBinary_bd, onsetInBinary_sd), axis=1)

                print '==== training ====\n'
                model.fit(X,
                          y_all,
                          epochs=30,
                          batch_size=640,
                          callbacks=[tbCallBack])
'''
==== Save the trained DNN model