X_iter = iter_data[0].copy() len_iter = len(X_iter) i = 1 while len(X_iter) > (len_iter / 2): nbr = BvsbUtils.KNNClassifier(X_train, Y_train) # KNN # iter_y=nbr.predict(X_iter) pred = nbr.predict_proba(X_iter) iter_y = np.argmax(pred, axis=1) classMax = np.max(pred, axis=1) # 获取未标记样本的最大隶属度 sortIndex = np.argsort(classMax) # 排序后的原下标 iter_index = np.sort(sortIndex[-select_h:]) sort_h_y = iter_y[iter_index] # 返回原来数据置信度最大的h个数 sort_h_data = X_iter[iter_index] len_curr_iter = len(sort_h_y) bvsbc = BvsbClassifier(X_train, Y_train, sort_h_data, sort_h_y, test_data[0], test_data[1], iterNum=0.1) bvsbc.createELM(n_hidden=hidden_nums, activation_func="tanh", alpha=1.0, random_state=0) _data_index = bvsbc.fitAndGetUpdateDataIndex(limit=int(0.2 * len_curr_iter)) if len(_data_index) != 0: X_train = np.r_[bvsbc.X_train, sort_h_data[_data_index]] Y_train = np.r_[bvsbc.Y_train, sort_h_y[_data_index]] X_iter = np.delete(X_iter, iter_index[_data_index], axis=0) else: print("没有数据被加入训练集,训练结束") break print(f"第{ii} 次训练,第{i}次迭代: 正确率为:{bvsbc.score(test_data[0], test_data[1])}") i += 1 acc_temp = bvsbc.score(test_data[0], test_data[1]) # 记录每次的精度 acc_rem.append(acc_temp) # 将每次的精度存入列表 print("***************ELM-BVSB-KNN加权算法(10次精度)********************") for i in acc_rem:
data = datasets.load_digits() stdc = StandardScaler() # 均值归一化 label_size = 0.3 data.data = stdc.fit_transform(data.data / 16.0) train, iter, test = elmUtils.splitDataWithIter(data.data, data.target, label_size, 0.2) Y_iter = BvsbUtils.KNNClassifierResult(train[0], train[1], iter[0]) print(Y_iter.size) tic = time.perf_counter_ns() bvsbc = BvsbClassifier(train[0], train[1], iter[0], Y_iter, test[0], test[1], iterNum=0.1) bvsbc.createELM(n_hidden=1000, activation_func="sigmoid", alpha=1.0, random_state=0) bvsbc.X_test = test[0] bvsbc.Y_test = test[1] bvsbc.trainELMWithKNNButBvsb() toc = time.perf_counter_ns() print(bvsbc.score(test[0], test[1])) print("ELM-BVSB 项目用时:%d" % ((toc - tic) / 1000 / 1000))