def errRate(self, x_test, y_test): """ Error rate for this members local data. """ y_pred = self.predict(x_test) if y_pred is None: return np.nan err_rate = compute_errorRate(y_test, y_pred) return err_rate
def scoreLocalData(self, partial_model): """ Error rate for partial_model/global model on this members local training data. """ if partial_model is None: return 1 y_pred = partial_model.predict(self.__x_train) # validation = partial_model.model.evaluate(self.__x_train,self.__y_train) errRate = compute_errorRate(self.__y_train, y_pred) # print("errRate: ", errRate, "validation: ", validation) return 1 - errRate / 2
def run_experiments(X_train, y_train, X_test, y_test, X_star_train, X_star_test): # normalization of the training data scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) ''' scaler = StandardScaler() scaler.fit(X_star_train) X_star_train = scaler.transform(X_star_train) X_star_test = scaler.transform(X_star_test) ''' results = OrderedDict() if 1: y_predicted = fit_SVM(X_train, y_train, X_test) results["svm"] = [util.compute_errorRate(y_test, y_predicted)] X_train_mod = np.column_stack((X_train, X_star_train)) X_test_mod = np.column_stack((X_test, X_star_test)) scaler = StandardScaler() scaler.fit(X_train_mod) X_train_mod = scaler.transform(X_train_mod) X_test_mod = scaler.transform(X_test_mod) # print(X_train_mod.shape) y_predicted = fit_SVM(X_train_mod, y_train, X_test_mod) results["svm_pi"] = [util.compute_errorRate(y_test, y_predicted)] if 1: y_predicted = KT_LUPI(X_train, X_star_train, y_train, X_test) results["svm_kt_lupi"] = [util.compute_errorRate(y_test, y_predicted)] if 1: y_predicted = RobustKT_LUPI(X_train, X_star_train, y_train, X_test) results["svm_robust_kt_lupi"] = [ util.compute_errorRate(y_test, y_predicted) ] print(results) return results
#scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) ''' scaler = StandardScaler() scaler.fit(X_star_train) X_star_train = scaler.transform(X_star_train) X_star_test = scaler.transform(X_star_test) ''' if 1: y_predicted = fit_SVM(X_train, y_train_label, X_test) print("SVM Error Rate:") errRateSVM[i] = util.compute_errorRate(y_test_label, y_predicted) print(errRateSVM[i]) if 1: X_train_mod = np.column_stack((X_train, X_star_train)) X_test_mod = np.column_stack((X_test, X_star_test)) #scaler = StandardScaler() scaler.fit(X_train_mod) X_train_mod = scaler.transform(X_train_mod) X_test_mod = scaler.transform(X_test_mod) #print(X_train_mod.shape) y_predicted = fit_SVM(X_train_mod, y_train_label, X_test_mod) print("SVM with extra features Error Rate:") errRateSVM_PI[i] = util.compute_errorRate(y_test_label, y_predicted) print(errRateSVM_PI[i])
def errRateEnsembleModel(self, x_test, y_test): y_pred = self.predictEnsemble(x_test) if y_pred is None: return np.nan errRate = compute_errorRate(y_test, y_pred) return errRate
def errRateFedEnsembleGlobalModel(self, x_test, y_test, model=None): if not model: model = self.trainEnsemble(x_test) y_pred = self.predictFedEnsembleGlobalModel(model, x_test) errRate = compute_errorRate(y_test, y_pred) return errRate
def errRateGlobalModel(self, x_test, y_test, model=None): if not model: model = self.trainGlobalModel() y_pred = self.predictGlobalModel(x_test, model) errRate = compute_errorRate(y_test, y_pred) return errRate
def alliance_test_loss(self, alliance_model): """ Use alliance global validation data. """ print("alliance_test_loss") y_pred = alliance_model.predict(self.x_test) error_rate = compute_errorRate(self.y_test, y_pred) return 1 - error_rate / 2