else: svm = SVC(gamma=0.1, C=10) # 特征标准化 X_train_std = std.fit_transform(X_train) X_test_std = std.transform(X_test) # 训练模型 svm.fit(X_train_std, Y_train) # 保存模型 file_path = r'.\model\svm_model_30features\\' + str( signal_len) + 's-train_' + str(k + 1) + '-fold.pickle' with open(file_path, 'wb') as fw: pickle.dump(svm, fw) # 预测 pred_testY = svm.predict(X_test_std) Se_test, Sp_test, ber = calc_ber(Y_test, pred_testY) Se_list.append(Se_test) Sp_list.append(Sp_test) ber_list.append(ber) #保存实验结果 result = np.vstack([Se_list, Sp_list, ber_list]) np.save(r'important_data\svm_30features_result\svm_' + str(signal_len) + 's', result) # 读取实验结果 result = np.load(r'important_data\svm_30features_result\svm_' + str(signal_len) + 's.npy') print('30features+svm,信号%ds' % (signal_len)) print('mean_Se:%.2f%%' % (np.mean(result[0, :]))) print('mean_Sp:%.2f%%' % (np.mean(result[1, :])))
Y_train_oh, batch_size=256, epochs=40, validation_data=(X_test, Y_test_oh)) # 保存模型 model.save(r'.\model\\' + path + '\\' + str(signal_len) + 's-train_' + str(k + 1) + '-fold.h5py') # 预测 prob_trainY = model.predict(X_train) prob_testY = model.predict(X_test) pred_trainY = np.argmax(prob_trainY, axis=1) pred_testY = np.argmax(prob_testY, axis=1) # 评价指标 Se_tr, Sp_tr, Ber_tr = calc_ber(Y_train, pred_trainY) Se_te, Sp_te, Ber_te = calc_ber(Y_test, pred_testY) Auc_tr = calc_auc(Y_train, np.argmax(prob_trainY, axis=1), 1) Auc_te = calc_auc(Y_test, np.argmax(prob_testY, axis=1), 1) # 绘制代价函数曲线 train_loss = history.history['loss'] test_loss = history.history['val_loss'] train_auc = history.history['auc'] test_auc = history.history['val_auc'] # 保存重要数据 important_data = { 'train_loss': train_loss,
name='final_output', use_bias='False')(tensor) model = Model(inputs=input0, outputs=prediction) # 加载训练好的模型权重 model.load_weights('.\model\\' + path + '\\' + str(signal_len) + 's-train_' + str(k + 1) + '-fold.h5py') # 预测 prob_testY = model.predict(X_test) # 加载重要参数 data_path = r'.\important_data\\' + path + '\\' + str( signal_len) + 's-train_' + str(k + 1) + '-fold.pickle' with open(data_path, 'rb') as f: parameter = pickle.load(f) thr = parameter['threshold'] # 概率转换成0-1标签 pred_testY = (prob_testY >= thr).astype('int8') pred_testY = pred_testY.reshape(-1) Ber.append(calc_ber(Y_test, pred_testY)[2]) # 找出错误样本的索引 error_index = np.argwhere((pred_testY - Y_test) != 0).reshape(-1) Error.append(error_index) print(k + 1) print('%.2f' % np.mean(Ber)) error = np.hstack([Error[0], Error[1], Error[2], Error[3], Error[4]]) for i in annotation: one = np.argwhere(original_label == i).reshape(-1) # 423 AF index # 错误样本索引与当前心律的索引的交集 c = list(set(error).intersection(set(one))) acc = (len(one) - len(c)) / len(one) print('%.2f' % acc)