def __init__(self, local_model): super(VFLHostModel, self).__init__() self.localModel = local_model self.feature_dim = local_model.get_output_dim() self.is_debug = False self.dense_model = DenseModel(input_dim=self.feature_dim, output_dim=1, bias=False) self.common_grad = None self.partial_common_grad = None self.current_global_step = None self.X = None
def __init__(self, local_model): super(VFLGuestModel, self).__init__() self.localModel = local_model self.feature_dim = local_model.get_output_dim() self.is_debug = False self.classifier_criterion = nn.BCEWithLogitsLoss() self.dense_model = DenseModel(input_dim=self.feature_dim, output_dim=1, bias=True) self.parties_grad_component_list = [] self.current_global_step = None self.X = None self.y = None
class VFLHostModel(object): def __init__(self, local_model): super(VFLHostModel, self).__init__() self.localModel = local_model self.feature_dim = local_model.get_output_dim() self.is_debug = False self.dense_model = DenseModel(input_dim=self.feature_dim, output_dim=1, bias=False) self.common_grad = None self.partial_common_grad = None self.current_global_step = None self.X = None def set_dense_model(self, dense_model): self.dense_model = dense_model def set_batch(self, X, global_step): self.X = X self.current_global_step = global_step def _forward_computation(self, X): self.A_Z = self.localModel.forward(X) A_U = self.dense_model.forward(self.A_Z) return A_U def _fit(self, X, y): back_grad = self.dense_model.backward(self.A_Z, self.common_grad) self.localModel.backward(X, back_grad) def receive_gradients(self, gradients): self.common_grad = gradients self._fit(self.X, None) def send_components(self): return self._forward_computation(self.X) def predict(self, X): return self._forward_computation(X)
class VFLGuestModel(object): def __init__(self, local_model): super(VFLGuestModel, self).__init__() self.localModel = local_model self.feature_dim = local_model.get_output_dim() self.is_debug = False self.classifier_criterion = nn.BCEWithLogitsLoss() self.dense_model = DenseModel(input_dim=self.feature_dim, output_dim=1, bias=True) self.parties_grad_component_list = [] self.current_global_step = None self.X = None self.y = None def set_dense_model(self, dense_model): self.dense_model = dense_model def set_batch(self, X, y, global_step): self.X = X self.y = y self.current_global_step = global_step def _fit(self, X, y): self.temp_K_Z = self.localModel.forward(X) self.K_U = self.dense_model.forward(self.temp_K_Z) self._compute_common_gradient_and_loss(y) self._update_models(X, y) def predict(self, X, component_list): temp_K_Z = self.localModel.forward(X) U = self.dense_model.forward(temp_K_Z) for comp in component_list: U = U + comp return sigmoid(np.sum(U, axis=1)) def receive_components(self, component_list): for party_component in component_list: self.parties_grad_component_list.append(party_component) def fit(self): self._fit(self.X, self.y) self.parties_grad_component_list = [] def _compute_common_gradient_and_loss(self, y): U = self.K_U for grad_comp in self.parties_grad_component_list: U = U + grad_comp U = torch.tensor(U, requires_grad=True).float() y = torch.tensor(y) y = y.type_as(U) class_loss = self.classifier_criterion(U, y) grads = torch.autograd.grad(outputs=class_loss, inputs=U) self.top_grads = grads[0].numpy() self.loss = class_loss.item() def send_gradients(self): return self.top_grads def _update_models(self, X, y): back_grad = self.dense_model.backward(self.temp_K_Z, self.top_grads) self.localModel.backward(X, back_grad) def get_loss(self): return self.loss
def run_experiment(train_data, test_data, batch_size, learning_rate, epoch): Xa_train, Xb_train, Xc_train, y_train = train_data Xa_test, Xb_test, Xc_test, y_test = test_data print( "################################ Wire Federated Models ############################" ) # create local models for both party A, party B and party C party_a_local_model = LocalModel(input_dim=Xa_train.shape[1], output_dim=60, learning_rate=learning_rate) party_b_local_model = LocalModel(input_dim=Xb_train.shape[1], output_dim=60, learning_rate=learning_rate) party_c_local_model = LocalModel(input_dim=Xc_train.shape[1], output_dim=60, learning_rate=learning_rate) # create lr model for both party A, party B and party C. Each party has a part of the whole lr model and # only party A has the bias since only party A has the labels. party_a_dense_model = DenseModel(party_a_local_model.get_output_dim(), 1, learning_rate=learning_rate, bias=True) party_b_dense_model = DenseModel(party_b_local_model.get_output_dim(), 1, learning_rate=learning_rate, bias=False) party_c_dense_model = DenseModel(party_c_local_model.get_output_dim(), 1, learning_rate=learning_rate, bias=False) partyA = VFLGuestModel(local_model=party_a_local_model) partyA.set_dense_model(party_a_dense_model) partyB = VFLHostModel(local_model=party_b_local_model) partyB.set_dense_model(party_b_dense_model) partyC = VFLHostModel(local_model=party_c_local_model) partyC.set_dense_model(party_c_dense_model) party_B_id = "B" party_C_id = "C" federatedLearning = VerticalMultiplePartyLogisticRegressionFederatedLearning( partyA) federatedLearning.add_party(id=party_B_id, party_model=partyB) federatedLearning.add_party(id=party_C_id, party_model=partyC) print( "################################ Train Federated Models ############################" ) fl_fixture = FederatedLearningFixture(federatedLearning) train_data = { federatedLearning.get_main_party_id(): { "X": Xa_train, "Y": y_train }, "party_list": { party_B_id: Xb_train, party_C_id: Xc_train } } test_data = { federatedLearning.get_main_party_id(): { "X": Xa_test, "Y": y_test }, "party_list": { party_B_id: Xb_test, party_C_id: Xc_test } } fl_fixture.fit(train_data=train_data, test_data=test_data, epochs=epoch, batch_size=batch_size)
def run_experiment(train_data, test_data, batch_size, learning_rate, epoch): print("hyper-parameters:") print("batch size: {0}".format(batch_size)) print("learning rate: {0}".format(learning_rate)) Xa_train, Xb_train, y_train = train_data Xa_test, Xb_test, y_test = test_data print( "################################ Wire Federated Models ############################" ) # create local models for both party A and party B party_a_local_model = LocalModel(input_dim=Xa_train.shape[1], output_dim=10, learning_rate=learning_rate) party_b_local_model = LocalModel(input_dim=Xb_train.shape[1], output_dim=20, learning_rate=learning_rate) # create lr model for both party A and party B. Each party has a part of the whole lr model and only party A has # the bias since only party A has the labels. party_a_dense_model = DenseModel(party_a_local_model.get_output_dim(), 1, learning_rate=learning_rate, bias=True) party_b_dense_model = DenseModel(party_b_local_model.get_output_dim(), 1, learning_rate=learning_rate, bias=False) partyA = VFLGuestModel(local_model=party_a_local_model) partyA.set_dense_model(party_a_dense_model) partyB = VFLHostModel(local_model=party_b_local_model) partyB.set_dense_model(party_b_dense_model) party_B_id = "B" federatedLearning = VerticalMultiplePartyLogisticRegressionFederatedLearning( partyA) federatedLearning.add_party(id=party_B_id, party_model=partyB) federatedLearning.set_debug(is_debug=False) print( "################################ Train Federated Models ############################" ) fl_fixture = FederatedLearningFixture(federatedLearning) # only party A has labels (i.e., Y), other parties only have features (e.g., X). # 'party_list' stores X for all other parties. # Since this is two-party VFL, 'party_list' only stores the X of party B. train_data = { federatedLearning.get_main_party_id(): { "X": Xa_train, "Y": y_train }, "party_list": { party_B_id: Xb_train } } test_data = { federatedLearning.get_main_party_id(): { "X": Xa_test, "Y": y_test }, "party_list": { party_B_id: Xb_test } } fl_fixture.fit(train_data=train_data, test_data=test_data, epochs=epoch, batch_size=batch_size)