def main(): dataset_add = '/home/bardia/Documents/University/Seventh Semester/Principles of Computational Intelligence/Digits Dataset/classes/' splitRatio = 0.33 sample_per_class = 20 radius = 6000 train_set, test_set = splitDataset(dataset_add, sample_per_class, splitRatio) predictions = getPredictions(train_set, test_set, radius) print('-------------') accuracy = getAccuracy(predictions) print('Accuracy is :' + str(accuracy) + '%')
def main(): dataset_add = '/home/bardia/Documents/University/Seventh Semester/Principles of Computational Intelligence/Digits Dataset/classes/' splitRatio = 0.33 sample_per_class = 5 train_set, test_set = splitDataset(dataset_add, sample_per_class, splitRatio) print('train=%d and test=%d' % (len(train_set), len(test_set))) # prepare model separated_by_class = separateByClass(train_set) summaries = summarizeByClass(separated_by_class) # test model predictions = getPredictions(summaries, test_set) accuracy = getAccuracy(predictions) print('Accuracy: ' + str(accuracy) + '%')
S = norm_float2int(S) N = norm_float2int(N) # mix noise and speech, trim 10 seconds if TOY, trim 2min 30seconds otherwise X, S = dataMix(S, N, fs, mix, TOY = args.TOY) X_int = norm_float2int(X) wavfile.write("X_train.wav", fs, X_int) S_int = norm_float2int(S) wavfile.write("S_train.wav", fs, S_int) S_mat = seq2mat(S, Frame_len, shift) X_mat = seq2mat(X, Frame_len, shift) S_train, S_dev, S_test = splitDataset(S_mat, seed = 1) X_train, X_dev, X_test = splitDataset(X_mat, seed = 1) FS_train = time2CompSpectro(S_train) FX_train = time2CompSpectro(X_train) FS_dev = time2CompSpectro(S_dev) FX_dev = time2CompSpectro(X_dev) F2M = freq2mel(n_Mel, fs, FX_train) # get M2F matrix by transpose M2F = F2M.T # normalize M2F matrix, plus a very small number to avoid underflow M2F = M2F / (np.sum(M2F, axis = 0, keepdims = True) + 0.00000001) MEL_S_train = np.dot(np.abs(FS_train), F2M) # output of NN (MEL_S/MEL_X) MEL_X_train = np.dot(np.abs(FX_train), F2M) # input of NN (MEL_X)
end_t = 1500 n_epochs = 1000 # number of complete pass in the dataset for training patience = 3 # patience for the early stopping l1_reg_const = 0.001 # const for l1 regularization penalty l2_reg_const = 0.001 # const for l2 regularization penalty ###################################### if section == "4.3.1.1": _, output = generateDataset(start_t, end_t) t = [i for i in range(start_t, end_t)] plotTimeSeries(t, output) if section == "4.3.1.2": input, output = generateDataset(start_t, end_t) x_train, x_val, x_test, y_train, y_val, y_test = splitDataset(input, output) sizes = [8, 16, 32, 64, 128, 256, 512] # models of NN to be trained nn2error = defaultdict(list) # Model for size in tqdm(sizes): # for each model size for i in range(5): # train 5 times for statistical significance model = tf.keras.Sequential([ layers.Dense(size, activation='relu', kernel_regularizer=regularizers.l1_l2(l1_reg_const, l2_reg_const), input_shape=[len(x_train[0])]), layers.Dense(1) ]) model.compile(optimizer='adam', loss='mse', metrics=['mse'])