コード例 #1
0
 def _set_weights(self):
     """Wang's weights"""
     if self._weighting_method == "proportional_to_mse" and not self._update:
         mse_rand = self._random_mean_squared_error(self.y_)
         mse_members = np.array([
             self._mean_squared_error(self.y_,
                                      member_clf.predict_proba(self.X_))
             for member_clf in self.ensemble_
         ])
         self.weights_ = mse_rand - mse_members
     elif self._weighting_method == "proportional_to_mse" and self._update:
         self.weights_ = np.array([
             1 / (self._mean_squared_error(
                 self.y_, member_clf.predict_proba(self.X_)) + 0.001)
             for member_clf in self.ensemble_
         ])
     elif self._weighting_method == "proportional_to_f1":
         self.weights_ = np.array([
             f1_score(self.y_, member_clf.predict(self.X_))
             for member_clf in self.ensemble_
         ])
     elif self._weighting_method == "proportional_to_g-mean":
         self.weights_ = np.array([
             g_mean(self.y_, member_clf.predict(self.X_))
             for member_clf in self.ensemble_
         ])
     elif self._weighting_method == "proportional_to_bac":
         self.weights_ = np.array([
             bac(self.y_, member_clf.predict(self.X_))
             for member_clf in self.ensemble_
         ])
     else:
         raise NotImplementedError
コード例 #2
0
    def balancedCoIncoTrial_Decoder(pe,feats):

        res = pd.DataFrame(np.zeros((2,4)),columns=['Test','BAc','P','Z'])

        # sample correct trials to match the number of incorrect trials.
        samp_co_trials = np.random.choice(TrSets['co'],nInCo,replace=False)

        train = np.concatenate( (TrSets['inco'], samp_co_trials ))
        test = np.setdiff1d(TrSets['co'], samp_co_trials)

        X_train = allZoneFR.loc[train,feats].values
        X_test = allZoneFR.loc[test,feats].values

        Y_cue_train = predVec['Cue'][train]
        Y_desc_train = predVec['Desc'][train]

        Y_test = predVec['Cue'][test] # cue and desc trials are the on the test set.

        # model trained on the cue
        res.loc[0,'Test'] = 'Cue'
        cue_mod = mod.fit(X_train,Y_cue_train)
        y_cue_hat = cue_mod.predict(X_test)
        res.loc[0,'BAc']  = bac(Y_test,y_cue_hat)*100

        cue_sh = np.zeros(nSh)
        for sh in np.arange(nSh):
            y_perm = np.random.permutation(Y_test)
            cue_sh[sh] = bac(y_perm,y_cue_hat)*100

        res.loc[0,'Z'] = getPerm_Z(cue_sh, res.loc[0,'BAc'] )
        res.loc[0,'P'] = getPerm_Pval(cue_sh, res.loc[0,'BAc'] )

        # model trained on the desc
        res.loc[1,'Test'] = 'Desc'
        desc_mod = mod.fit(X_train,Y_desc_train)
        y_desc_hat = desc_mod.predict(X_test)
        res.loc[1,'BAc']  = bac(Y_test,y_desc_hat)*100

        desc_sh = np.zeros(nSh)
        for sh in np.arange(nSh):
            y_perm = np.random.permutation(Y_test)
            desc_sh[sh] = bac(y_perm,y_desc_hat)*100
        res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
        res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )

        return res
コード例 #3
0
 def _evaluate(individual, y_predicts, y_true, pairwise_div_stat):
     predictions = MCE.get_group(individual, y_predicts)
     if predictions.size > 0:
         y_predict = MCE._majority_voting(predictions)
         qual = bac(y_true, y_predict)
     else:
         qual = 0
     div = MCE.Q_statistic(individual, pairwise_div_stat)
     return qual, div
コード例 #4
0
    def IncoTrial_Decoder(train,test):

        res = pd.DataFrame(np.zeros((3,4)),columns=['Test','BAc','P','Z'])
        temp = mod.fit(X_train[train],y_train[train])

        res.loc[0,'Test'] = 'Model'
        y_hat = temp.predict(X_train[test])
        res.loc[0,'BAc'] = bac(y_train[test],y_hat)*100

        # shuffle for held out train set
        mod_sh = np.zeros(nSh)
        for sh in np.arange(nSh):
            y_perm_hat = np.random.permutation(y_hat)
            mod_sh[sh] = bac(y_train[test],y_perm_hat)*100
        res.loc[0,'Z'] = getPerm_Z(mod_sh, res.loc[0,'BAc'] )
        res.loc[0,'P'] = getPerm_Pval(mod_sh, res.loc[0,'BAc'] )

        # predictions on x test
        y_hat = temp.predict(X_test)
        res.loc[1,'Test'] = 'Cue'
        res.loc[1,'BAc'] = bac(y_test_cue,y_hat)*100

        res.loc[2,'Test'] = 'Desc'
        res.loc[2,'BAc'] = 100-res.loc[1,'BAc']

        # shuffles for ytest cue/desc
        cue_sh = np.zeros(nSh)
        for sh in np.arange(nSh):
            y_perm_hat = np.random.permutation(y_hat)
            cue_sh[sh] = bac(y_test_cue,y_perm_hat)*100

        res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
        res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )

        res.loc[2,'Z'] = getPerm_Z(100-cue_sh, res.loc[2,'BAc'] )
        res.loc[2,'P'] = getPerm_Pval(100-cue_sh, res.loc[2,'BAc'] )

        return res
コード例 #5
0
    def _get_weigth_for_candidate(self, candidate_clf):

        sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5)
        weight = []
        for train_index, test_index in sss.split(self.X_, self.y_):
            for i in range(2):
                if self._sampling == 'over':
                    ros = RandomOverSampler(random_state=0)
                    X, y = ros.fit_resample(self.X_[train_index],
                                            self.y_[train_index])
                elif self._sampling == 'under':
                    rus = RandomUnderSampler(random_state=0)
                    X, y = rus.fit_resample(self.X_[train_index],
                                            self.y_[train_index])
                else:
                    X, y = self.X_[train_index], self.y_[train_index]
                candidate_clf.fit(X, y)
                if self._weighting_method == "proportional_to_mse" and not self._update:
                    weight.append(
                        self._random_mean_squared_error(self.y_[test_index]) -
                        self._mean_squared_error(
                            self.y_[test_index],
                            candidate_clf.predict_proba(self.X_[test_index])))
                elif self._weighting_method == "proportional_to_mse" and self._update:
                    weight.append(1 / (self._mean_squared_error(
                        self.y_[test_index],
                        candidate_clf.predict_proba(self.X_[test_index])) +
                                       0.001))
                elif self._weighting_method == "proportional_to_f1":
                    weight.append(
                        f1_score(self.y_[test_index],
                                 candidate_clf.predict(self.X_[test_index])))
                elif self._weighting_method == "proportional_to_g-mean":
                    weight.append(
                        g_mean(self.y_[test_index],
                               candidate_clf.predict(self.X_[test_index])))
                elif self._weighting_method == "proportional_to_bac":
                    weight.append(
                        bac(self.y_[test_index],
                            candidate_clf.predict(self.X_[test_index])))
                else:
                    raise NotImplementedError

                train_index, test_index = test_index, train_index

            return sum(weight) / len(weight)
コード例 #6
0
X_test_pred = np.stack((DTC_pred, LR_pred, kNN_pred), axis=-1)

#obtain predictions for ensemble models using schceme 1
Y_DTC_ensemble = DTC_ensemble.predict(X_test_pred)
Y_LR_ensemble = LR_ensemble.predict(X_test_pred)
Y_kNN_ensemble = kNN_ensemble.predict(X_test_pred)

#obtain features for schceme 2
DTC_pred = DTC.predict(X2_test)
LR_pred = LR.predict(X2_test)
kNN_pred = kNN.predict(X2_test)
X_test_pred = np.stack((DTC_pred, LR_pred, kNN_pred), axis=-1)

#obtain predictions for ensemble models using schceme 2
Y_DTC_ensemble2 = DTC_ensemble2.predict(X_test_pred)
Y_LR_ensemble2 = LR_ensemble2.predict(X_test_pred)
Y_kNN_ensemble2 = kNN_ensemble2.predict(X_test_pred)

#calculate and prind balanced accuracy scores
print("\nBALANCED ACCURACY SCORES:")
print("------------------------------------------------------------")
print("DTC: ", bac(Y_test, Y_DTC))
print("LR: ", bac(Y_test, Y_LR))
print("kNN: ", bac(Y_test, Y_kNN))
print("DTC_ensemble: ", bac(Y_test, Y_DTC_ensemble))
print("LR_ensemble: ", bac(Y_test, Y_LR_ensemble))
print("kNN_ensemble: ", bac(Y_test, Y_kNN_ensemble))
print("DTC_ensemble2: ", bac(Y2_test, Y_DTC_ensemble2))
print("LR_ensemble2: ", bac(Y2_test, Y_LR_ensemble2))
print("kNN_ensemble2: ", bac(Y2_test, Y_kNN_ensemble2))
コード例 #7
0
 def _evaluate_q(individual, y_predicts, y_true):
     predictions = MCE.get_group(individual, y_predicts)
     y_predict = MCE._majority_voting(predictions)
     qual = bac(y_true, y_predict)
     return (qual, )