def mim(data): rank = [] for i in range(6): X = data[i][:, :-1] Y = data[i][:, -1] F, _, _ = MIM.mim(X, Y) idx = samp(F[:-1].tolist()) rank.append(idx) R = rankaggregate(rank) return R
def feature_mutual_info_maximisation(x_data, y_data): features_scores = MIM.mim(x_data.values, y_data.values, n_selected_features=20) features_index = [int(index[0]) for index in features_scores] feat_list = x_data.columns.values[features_index] feat_list_with_imp = [(feat_list[i], features_scores[i][1]) for i in range(len(features_scores))] # dfscores = pd.DataFrame(features_scores) # dfcolumns = pd.DataFrame(x_data.columns) # featureScores = pd.concat([dfcolumns, dfscores], axis=1) featureScores = pd.DataFrame(feat_list_with_imp) featureScores.columns = ['Specs', 'Score'] # naming the dataframe columns top_20_features = featureScores.nlargest(20, 'Score') return top_20_features
def run_feature_selection(X, Y, n_selected_features): lst = [] if PARALLEL: # with multiprocessing.Pool(processes=4) as pool: # lst.append(pool.apply(JMI.jmi, args=(X, Y), kwds={'n_selected_features': n_selected_features})) # lst.append(pool.apply(MIM.mim, args=(X, Y), kwds={'n_selected_features': n_selected_features})) # lst.append(pool.apply(MRMR.mrmr, args=(X, Y), kwds={'n_selected_features': n_selected_features})) # lst.append(pool.apply(MIFS.mifs, args=(X, Y), kwds={'n_selected_features': n_selected_features})) # lst = [l[FEAT_IDX] for l in lst] with ProcessPoolExecutor(max_workers=4) as executor: lst.append( executor.submit(JMI.jmi, X, Y, n_selected_features=n_selected_features)) lst.append( executor.submit(MIM.mim, X, Y, n_selected_features=n_selected_features)) lst.append( executor.submit(MRMR.mrmr, X, Y, n_selected_features=n_selected_features)) lst.append( executor.submit(MIFS.mifs, X, Y, n_selected_features=n_selected_features)) lst = [l.result()[FEAT_IDX] for l in lst] else: lst.append( JMI.jmi(X, Y, n_selected_features=n_selected_features)[FEAT_IDX]) lst.append( MIM.mim(X, Y, n_selected_features=n_selected_features)[FEAT_IDX]) lst.append( MRMR.mrmr(X, Y, n_selected_features=n_selected_features)[FEAT_IDX]) lst.append( MIFS.mifs(X, Y, n_selected_features=n_selected_features)[FEAT_IDX]) return lst
def run_fold(trial,P,X,y,method,dataset,parttype): print 'Obtaining features for %s %s %s fold: %2d' % (parttype,method,dataset,trial) n_samples, n_features = X.shape train = P[:,trial] == 1 trnX = X[train] trnY = y[train] start_time = time.time() if method == 'fisher': score = fisher_score.fisher_score(trnX,trnY) features = fisher_score.feature_ranking(score) elif method == 'chi2': score = chi_square.chi_square(trnX,trnY) features = chi_square.feature_ranking(score) elif method == 'relieff': score = reliefF.reliefF(trnX,trnY) features = reliefF.feature_ranking(score) elif method == 'jmi': features = JMI.jmi(trnX,trnY, n_selected_features=n_features) elif method == 'mrmr': features = MRMR.mrmr(trnX,trnY,n_selected_features=n_features) elif method == 'infogain': features = MIM.mim(trnX,trnY,n_selected_features=n_features) elif method == 'svmrfe': features = svmrfe(trnX,trnY) elif method == 'hdmr': sobol_set_all = scipy.io.loadmat('sobol_set.mat') sobol_set = sobol_set_all['sobol_set'] sobol_set = sobol_set.astype(float) params = {'sobol_set':sobol_set,'k':1,'p':3,'M':1000,'b':'L'} models = hdmrlearn(trnX,trnY,params) features,w = hdmrselect(X,models) elif method == 'hdmrhaar': sobol_set_all = scipy.io.loadmat('sobol_set.mat') sobol_set = sobol_set_all['sobol_set'] sobol_set = sobol_set.astype(float) params = {'sobol_set':sobol_set,'k':1,'p':255,'M':1000,'b':'H'} models = hdmrlearn(trnX,trnY,params) features,w = hdmrselect(X,models) else: print(method + 'does no exist') cputime = time.time() - start_time print features print 'cputime %f' % cputime return {'features': features, 'cputime': cputime}
def main(): # load data mat = scipy.io.loadmat('../data/colon.mat') X = mat['X'] # data X = X.astype(float) y = mat['Y'] # label y = y[:, 0] n_samples, n_features = X.shape # number of samples and number of features # split data into 10 folds ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True) # perform evaluation on classification task num_fea = 10 # number of selected features clf = svm.LinearSVC() # linear SVM correct = 0 for train, test in ss: # obtain the index of each feature on the training set idx,_,_ = MIM.mim(X[train], y[train], n_selected_features=num_fea) # obtain the dataset on the selected features features = X[:, idx[0:num_fea]] # train a classification model with the selected features on the training dataset clf.fit(features[train], y[train]) # predict the class labels of test data y_predict = clf.predict(features[test]) # obtain the classification accuracy on the test data acc = accuracy_score(y[test], y_predict) correct = correct + acc # output the average classification accuracy over all 10 folds print 'Accuracy:', float(correct)/10
def main(): # load data mat = scipy.io.loadmat('../data/colon.mat') X = mat['X'] # data X = X.astype(float) y = mat['Y'] # label y = y[:, 0] n_samples, n_features = X.shape # number of samples and number of features # split data into 10 folds ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True) # perform evaluation on classification task num_fea = 10 # number of selected features clf = svm.LinearSVC() # linear SVM correct = 0 for train, test in ss: # obtain the index of each feature on the training set idx = MIM.mim(X[train], y[train], n_selected_features=num_fea) # obtain the dataset on the selected features features = X[:, idx[0:num_fea]] # train a classification model with the selected features on the training dataset clf.fit(features[train], y[train]) # predict the class labels of test data y_predict = clf.predict(features[test]) # obtain the classification accuracy on the test data acc = accuracy_score(y[test], y_predict) correct = correct + acc # output the average classification accuracy over all 10 folds print('Accuracy:', old_div(float(correct), 10))
def experiment(data, box, cv, output): """ Write the results of an experiment. This function will run an experiment for a specific dataset for a bounding box. There will be CV runs of randomized experiments run and the outputs will be written to a file. Parameters ---------- data : string Dataset name. box : string Bounding box on the file name. cv : int Number of cross validation runs. output : string If float or tuple, the projection will be the same for all features, otherwise if a list, the projection will be described feature by feature. Returns ------- None Raises ------ ValueError If the percent poison exceeds the number of samples in the requested data. """ #data, box, cv, output = 'conn-bench-sonar-mines-rocks', '1', 5, 'results/test.npz' # load normal and adversarial data path_adversarial_data = 'data/attacks/' + data + '_[xiao][' + box + '].csv' df_normal = pd.read_csv('data/clean/' + data + '.csv', header=None).values df_adversarial = pd.read_csv(path_adversarial_data, header=None).values # separate out the normal and adversarial data Xn, yn = df_normal[:,:-1], df_normal[:,-1] Xa, ya = df_adversarial[:,:-1], df_adversarial[:,-1] # change the labels from +/-1 to [0,1] ya[ya==-1], yn[yn==-1] = 0, 0 # calculate the rattios of data that would be used for training and hold out p0, p1 = 1./cv, (1. - 1./cv) N = len(Xn) # calculate the total number of training and testing samples and set the number of # features that are going to be selected Ntr, Nte = int(p1*N), int(p0*N) ##### [OBS]: Losing one feature in the process n_selected_features = int(Xn.shape[1]*SEL_PERCENT)+1 # zero the results out acc_KNN = np.zeros((NPR,6)) #################################### # CLASSIFICATION ################################## # run `cv` randomized experiments. note this is not performing cross-validation, rather # we are going to use randomized splits of the data. for _ in range(cv): # shuffle up the data for the experiment then split the data into a training and # testing dataset i = np.random.permutation(N) Xtrk, ytrk, Xtek, ytek = Xn[i][:Ntr], yn[i][:Ntr], Xn[i][-Nte:], yn[i][-Nte:] ####### Classification on Normal Data with no FS ####################### yn_allfeature_KNN = KNN_classification(Xtrk, ytrk, Xtek, ytek) ####### Classification on JMI-based features on Normal data ############# sf_base_jmi = JMI.jmi(Xtrk, ytrk, n_selected_features=n_selected_features)[FEAT_IDX] #print("\nNOR: JMI features", sf_base_jmi) Xtr_jmi = Xtrk[:, sf_base_jmi] Xte_jmi = Xtek[:, sf_base_jmi] yn_JMI_KNN = KNN_classification(Xtr_jmi, ytrk, Xte_jmi, ytek) for n in range(NPR): # calucate the number of poisoned data that we are going to need to make sure # that the poisoning ratio is correct in the training data. e.g., if you have # N=100 samples and you want to poison by 20% then the 20% needs to be from # the training size. hence it is not 20. Np = int(len(ytrk)*POI_RNG[n]+1) if Np >= len(ya): # shouldn't happen but catch the case where we are requesting more poison # data samples than are available. NEED TO BE CAREFUL WHEN WE ARE CREATING # THE ADVERSARIAL DATA ValueError('Number of poison data requested is larger than the available data.') # find the number of normal samples (i.e., not poisoned) samples in the # training data. then create the randomized data set that has Nn normal data # samples and Np adversarial samples in the training data Nn = len(ytrk) - Np idx_normal, idx_adversarial = np.random.permutation(len(ytrk))[:Nn], \ np.random.permutation(len(ya))[:Np] Xtrk_poisoned, ytrk_poisoned = np.concatenate((Xtrk[idx_normal], Xa[idx_adversarial])), \ np.concatenate((ytrk[idx_normal], ya[idx_adversarial])) ya_allfeature_KNN = KNN_classification(Xtrk_poisoned, ytrk_poisoned, Xtek, ytek) # run feature selection with the training data that has adversarial samples sf_adv_jmi = JMI.jmi(Xtrk_poisoned, ytrk_poisoned, n_selected_features=n_selected_features)[FEAT_IDX] sf_adv_mim = MIM.mim(Xtrk_poisoned, ytrk_poisoned, n_selected_features=n_selected_features)[FEAT_IDX] sf_adv_mrmr = MRMR.mrmr(Xtrk_poisoned, ytrk_poisoned, n_selected_features=n_selected_features)[FEAT_IDX] sf_adv_misf = MIFS.mifs(Xtrk_poisoned, ytrk_poisoned, n_selected_features=n_selected_features)[FEAT_IDX] # KNN Classification on JMI selected features Xtrk_poisoned_JMI = Xtrk_poisoned[:, sf_adv_jmi] Xtest_JMI = Xtek[:, sf_adv_jmi] ya_JMI_KNN = KNN_classification(Xtrk_poisoned_JMI, ytrk_poisoned, Xtest_JMI, ytek) # KNN Classification on MIM selected features Xtrk_poisoned_MIM = Xtrk_poisoned[:, sf_adv_mim] Xtest_MIM = Xtek[:, sf_adv_mim] ya_MIM_KNN = KNN_classification(Xtrk_poisoned_MIM, ytrk_poisoned, Xtest_MIM, ytek) # KNN Classification on MRMR selected features Xtrk_poisoned_MRMR = Xtrk_poisoned[:, sf_adv_mrmr] Xtest_MRMR = Xtek[:, sf_adv_mrmr] ya_MRMR_KNN = KNN_classification(Xtrk_poisoned_MRMR, ytrk_poisoned, Xtest_MRMR, ytek) # KNN Classification on MISF selected features Xtrk_poisoned_MISF = Xtrk_poisoned[:, sf_adv_misf] Xtest_MISF = Xtek[:, sf_adv_misf] ya_MISF_KNN = KNN_classification(Xtrk_poisoned_MISF, ytrk_poisoned, Xtest_MISF, ytek) """ ######### KNN Classification on adversarial data with no FS ################# ya_allfeature_KNN = KNN_classification(Xtrk_poisoned, ytrk_poisoned, Xtek, ytek) #print("[ADV] KNN: No FS Confusion Matrix for Poisoning Ratio: ", POI_RNG[n], "\n", confusion_matrix(ytek, ya_allfeature_KNN)) #print(classification_report(ytek, ya_allfeature_KNN)) #print("[ADV] KNN: NO FS Accuracy for Poisoning ratio", POI_RNG[n], "\n", accuracy_score(ytek, ya_allfeature_KNN)) ######### KNN Classification on adversarial data with JMI FS ################# sf_adv_jmi = JMI.jmi(Xtrk_poisoned, ytrk_poisoned, n_selected_features=n_selected_features)[FEAT_IDX] Xtrk_poisoned_JMI = Xtrk_poisoned[:, sf_adv_jmi] Xtest_JMI = Xtek[:, sf_adv_jmi] ya_JMI_KNN = KNN_classification(Xtrk_poisoned_JMI, ytrk_poisoned, Xtest_JMI, ytek) #print("\nJMI Features: ", sf_adv_jmi) #print("[ADV] KNN: JMI FS Confusion Matrix for Poisoning Ratio: ", POI_RNG[n], "\n", confusion_matrix(ytek, ya_JMI_KNN)) #print("[ADV] KNN: JMI Accuracy for Poisoning ratio", POI_RNG[n], "\n", accuracy_score(ytek, ya_JMI_KNN)) ######### KNN Classification on adversarial data with MIM FS ################# sf_adv_mim = MIM.mim(Xtrk_poisoned, ytrk_poisoned, n_selected_features=n_selected_features)[FEAT_IDX] Xtrk_poisoned_MIM = Xtrk_poisoned[:, sf_adv_mim] Xtest_MIM = Xtek[:, sf_adv_mim] ya_MIM_KNN = KNN_classification(Xtrk_poisoned_MIM, ytrk_poisoned, Xtest_MIM, ytek) #print("\nMIM Features: ", sf_adv_mim) #print("[ADV] KNN: MIM FS Confusion Matrix for Poisoning Ratio: ", POI_RNG[n], "\n", confusion_matrix(ytek, ya_MIM_KNN)) #print("[ADV] KNN: MIM Accuracy for Poisoning ratio", POI_RNG[n], "\n", accuracy_score(ytek, ya_MIM_KNN)) ######### KNN Classification on adversarial data with MRMR FS ################# sf_adv_mrmr = MRMR.mrmr(Xtrk_poisoned, ytrk_poisoned, n_selected_features=n_selected_features)[FEAT_IDX] Xtrk_poisoned_MRMR = Xtrk_poisoned[:, sf_adv_mrmr] Xtest_MRMR = Xtek[:, sf_adv_mrmr] ya_MRMR_KNN = KNN_classification(Xtrk_poisoned_MRMR, ytrk_poisoned, Xtest_MRMR, ytek) #print("\nMRMR Features: ", sf_adv_mrmr) #print("[ADV] KNN: MRMR FS Confusion Matrix for Poisoning Ratio: ", POI_RNG[n], "\n", confusion_matrix(ytek, ya_MRMR_KNN)) #print("[ADV] KNN: MRMR Accuracy for Poisoning ratio", POI_RNG[n], "\n", accuracy_score(ytek, ya_MRMR_KNN)) ######### KNN Classification on adversarial data with MISF FS ################# sf_adv_misf = MIFS.mifs(Xtrk_poisoned, ytrk_poisoned, n_selected_features=n_selected_features)[FEAT_IDX] Xtrk_poisoned_MISF = Xtrk_poisoned[:, sf_adv_misf] Xtest_MISF = Xtek[:, sf_adv_misf] ya_MISF_KNN = KNN_classification(Xtrk_poisoned_MISF, ytrk_poisoned, Xtest_MISF, ytek) #print("\nMISF Features: ", sf_adv_misf) #print("[ADV] KNN: MISF FS Confusion Matrix for Poisoning Ratio: ", POI_RNG[n], "\n", confusion_matrix(ytek, ya_MISF_KNN)) #print("[ADV] KNN: MISF Accuracy for Poisoning ratio", POI_RNG[n], "\n", accuracy_score(ytek, ya_MISF_KNN)) """ # Calculate accumulated accuracy in a matrix of size 9x6 acc_KNN[n, 0] += accuracy_score(ytek, yn_allfeature_KNN) # Acc score of normal data without Feature Selection acc_KNN[n, 1] += accuracy_score(ytek, ya_allfeature_KNN) # Acc score of adversarial data without Feature Selection acc_KNN[n, 2] += accuracy_score(ytek, ya_JMI_KNN) # Acc score of adversarial data with JMI Feature Selection algo acc_KNN[n, 3] += accuracy_score(ytek, ya_MIM_KNN) # Acc score of adversarial data with MIM Feature Selection algo acc_KNN[n, 4] += accuracy_score(ytek, ya_MRMR_KNN) # Acc score of adversarial data with MRMR Feature Selection algo acc_KNN[n, 5] += accuracy_score(ytek, ya_MISF_KNN) # Acc score of adversarial data with MISF Feature Selection algo #print(acc_KNN) # scale the accuracy statistics by 1.0/cv then write the output file acc_KNN = acc_KNN/cv print("\n Accuracy matrix of KNN") print("[COL]: Norm_noFS, Adv_noFS, Adv_JMI, Adv_MIM, Adv_MRMR, Adv_MISF") print("[ROW]: Poisoning ratios: 0.01, 0.025, 0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2") print("\n", acc_KNN) np.savez(output, acc_KNN=acc_KNN) return None
repr(average_precision_score(Y_test, Y_pred))) print("Kappa: " + repr(cohen_kappa_score(Y_test, Y_pred))) print("Hamming Loss: " + repr(hamming_loss(Y_test, Y_pred))) print("AUC" + repr(roc_auc_score(Y_test, Y_pred))) print("Sensitivity" + repr(recall_score(Y_test, Y_pred))) tn, fp, fn, tp = confusion_matrix(Y_test, Y_pred).ravel() print("Specificity" + repr(tn / (tn + fp))) sheet_test.write(r, c, roc_auc_score(Y_test, Y_pred)) r = r + 1 c = c + 1 r = 0 MV_sel = [] MV_sel.append(('MIM', MIM.mim(X_train, Y_train, n_selected_features=num_fea))) print('MIM') MV_sel.append(('MIFS', MIFS.mifs(X_train, Y_train, n_selected_features=num_fea))) print('MIFS') MV_sel.append(('MRMR', MRMR.mrmr(X_train, Y_train, n_selected_features=num_fea))) print('MRMR') MV_sel.append(('CIFE', CIFE.cife(X_train, Y_train, n_selected_features=num_fea))) print('CIFE') MV_sel.append(('JMI', JMI.jmi(X_train, Y_train, n_selected_features=num_fea))) print('JMI') MV_sel.append(('CMIM', CMIM.cmim(X_train, Y_train, n_selected_features=num_fea))) print('CMIM')
result = pymrmr.mRMR(X, 'MIQ', 10) print(result) def import_Data(): Data = pd.read_csv('Disease_Data_BiGram.csv') # print(Data.shape) X = Data.iloc[:, 0:Data.shape[1] - 2] Y = Data['Class'] Y_ = Data['Subject'] return X, Y, Y_ FS = {} X, Y, Y_ = import_Data() FS['MRMR'] = X.columns[MRMR.mrmr(np.array(X), Y_, n_selected_features=15)[:15]] FS['JMI'] = X.columns[JMI.jmi(np.array(X), Y_, n_selected_features=15)[:15]] FS['MIFS'] = X.columns[MIFS.mifs(np.array(X), Y_, n_selected_features=15)[:15]] FS['MIM'] = X.columns[MIM.mim(np.array(X), Y_, n_selected_features=15)[:15]] FS = pd.DataFrame(FS) print(FS) FS.to_csv('Selected_Features_MultiVar_BiG.csv') #print(pd.DataFrame(FS)) #model = apply_Model(X,Y_)