pool_classifiers = RandomForestClassifier(n_estimators=10) pool_classifiers.fit(Feature_train, Label_train.ravel()) mcb = MCB(pool_classifiers) mcb.fit(Feature_train, Label_train.ravel()) Label_predict = mcb.predict(Feature_test) elif m == 'DES-MI': pool_classifiers = RandomForestClassifier(n_estimators=10) pool_classifiers.fit(Feature_train, Label_train.ravel()) dmi = DESMI(pool_classifiers) dmi.fit(Feature_train, Label_train.ravel()) Label_predict = dmi.predict(Feature_test) elif m == 'One_vs_Rest-SMOTE-XGBoost': sm = SMOTE() Feature_train_o, Label_train_o = sm.fit_sample(Feature_train, Label_train.ravel()) clf = OneVsRestClassifier(xgboost.XGBClassifier()) clf.fit(Feature_train_o, Label_train_o) Label_predict = clf.predict(Feature_test) elif m == 'One_vs_Rest-XGBoost': clf = OneVsRestClassifier(xgboost.XGBClassifier()) clf.fit(Feature_train, Label_train.ravel()) Label_predict = clf.predict(Feature_test) ml_record.measure(i, Label_test, Label_predict, 'weighted') i += 1 file_wirte = "Result_One_vs_All.txt" ml_record.output(file_wirte, m, Dir)
#print("Data Set Folder: ", file, ", SMOTE folder id: ", str(k)) Feature_train_smote = np.concatenate( (SMOTE_feature_train_list[k][0], SMOTE_feature_valid_list[k][0])) Label_train_smote = np.concatenate( (SMOTE_label_train_list[k][0], SMOTE_label_valid_list[k][0])) pool_classifiers[k].fit(Feature_train_smote, Label_train_smote.ravel()) if m == 'META-DES-XGBoost': metades = METADES(pool_classifiers) metades.fit(Feature_train, Label_train.ravel()) Label_predict = metades.predict(Feature_test) elif m == 'MCB-XGBoost': mcb = MCB(pool_classifiers) mcb.fit(Feature_train, Label_train.ravel()) Label_predict = mcb.predict(Feature_test) elif m == 'DES-MI-XGBoost': dmi = DESMI(pool_classifiers) dmi.fit(Feature_train, Label_train.ravel()) Label_predict = dmi.predict(Feature_test) print(confusion_matrix(Label_test, Label_predict)) ml_record.measure(i, Label_test, Label_predict, 'weighted') i += 1 file_wirte = "Result_Esemble_Tang.txt" ml_record.output(file_wirte, m, 'Tang')