if len(unique_row) == 0 or len(unique_col) == 0: print("\n Finished! \n") break for index, row in increase_res.iterrows(): if row['row'] in unique_row and row['col'] in unique_col and row[ 'row'] not in unique_col: hidden_num -= 1 new_net = reduced_ann_net(old_net, int(row['row']), int(row['col']), hidden_num) print( "\n======= Net hidden size: {}==========\n".format(hidden_num)) start_time = time.time() acc, pred = test_model(new_net, x_test, y_test) stop_time = time.time() print("Execution time: %s ms" % ((stop_time - start_time) * 1000)) times.append((stop_time - start_time) * 1000) mat = confusion(x_test.size(0), 3, pred, y_test) F1_score(mat) # Save the new network and evaluate its vector angle. nets.append(new_net) old_net = new_net saveNNParas(new_net, x_test, hidden_num) vectors = pd.read_excel('vector_angle.xls', header=None) if (vectors.empty): cnt = 10
# #normalized = min_max_norm(raw_data, 14) #normalized.to_excel('music-features-processed.xlsx') X_train, Y_train, X_test, Y_test = load_data( 'music-affect_v1/music-features-processed.xlsx', features_num, label_loc, features_selector=selector, spliting_ratio=0.8) net = Net(features_num, hidden_num, classes_num) train_model(net, X_train, Y_train, lr=learning_rate, epochs=epochs_num) start_time = time.time() accuracy, Y_pred = test_model(net, X_test, Y_test) print("Execution time: %s ms" % ((time.time() - start_time) * 1000)) #Save relevant parameter for analysis. if accuracy > 40: saveNNParas(net, X_test, hidden_num) torch.save(net.state_dict(), 'net_model.pt') saveDataset(X_train, Y_train, 'training') saveDataset(X_test, Y_test, 'testing') mat = confusion(X_test.size(0), classes_num, Y_pred, Y_test) print("Confusion Matrix:") print(mat) F1_score(mat) print("\n========================== END ==================================")