def test_train_config_with_jit_trace(hook, workers): # pragma: no cover alice = workers["alice"] me = workers["me"] data = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]], requires_grad=True) target = torch.tensor([[1], [0], [1], [0]]) dataset = sy.BaseDataset(data, target) alice.add_dataset(dataset, key="vectors") @hook.torch.jit.script def loss_fn(real, pred): return ((real.float() - pred.float())**2).mean() class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(2, 3) self.fc2 = nn.Linear(3, 2) self.fc3 = nn.Linear(2, 1) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x model_untraced = Net() model = torch.jit.trace(model_untraced, data) model_with_id = pointers.ObjectWrapper(model, sy.ID_PROVIDER.pop()) loss_fn_with_id = pointers.ObjectWrapper(loss_fn, sy.ID_PROVIDER.pop()) model_ptr = me.send(model_with_id, alice) loss_fn_ptr = me.send(loss_fn_with_id, alice) print("Evaluation before training") pred = model(data) loss_before = loss_fn(real=target, pred=pred) print("Loss: {}".format(loss_before)) # Create and send train config train_config = sy.TrainConfig(model=model, loss_fn=loss_fn, batch_size=2) train_config.send(alice) for epoch in range(5): loss = alice.fit(dataset="vectors") print("-" * 50) print("Iteration %s: alice's loss: %s" % (epoch, loss)) print("Evaluation after training:") new_model = model_ptr.get() pred = new_model.obj(data) loss_after = loss_fn(real=target, pred=pred) print("Loss: {}".format(loss_after)) assert loss_after < loss_before
def test_fit(): data = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]], requires_grad=True) target = torch.tensor([[1], [0], [1], [0]]) fed_client = federated.FederatedClient() dataset = sy.BaseDataset(data, target) fed_client.add_dataset(dataset, key="vectors") def loss_fn(real, pred): return ((real.float() - pred.float()) ** 2).mean() class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = torch.nn.Linear(2, 3) self.fc2 = torch.nn.Linear(3, 2) self.fc3 = torch.nn.Linear(2, 1) def forward(self, x): x = torch.nn.functional.relu(self.fc1(x)) x = torch.nn.functional.relu(self.fc2(x)) x = self.fc3(x) return x model_untraced = Net() model = torch.jit.trace(model_untraced, data) model_id = 0 model_ow = pointers.ObjectWrapper(obj=model, id=model_id) loss_id = 1 loss_ow = pointers.ObjectWrapper(obj=loss_fn, id=loss_id) print("Evaluation before training") pred = model(data) loss_before = loss_fn(real=target, pred=pred) print("Loss: {}".format(loss_before)) # Create and send train config train_config = sy.TrainConfig( batch_size=1, model=None, loss_fn=None, model_id=model_id, loss_fn_id=loss_id ) fed_client.set_obj(model_ow) fed_client.set_obj(loss_ow) fed_client.set_obj(train_config) for epoch in range(5): loss = fed_client.fit(dataset_key="vectors") print("-" * 50) print("Iteration %s: alice's loss: %s" % (epoch, loss)) print("Evaluation after training:") new_model = fed_client.get_obj(model_id) pred = new_model.obj(data) loss_after = loss_fn(real=target, pred=pred) print("Loss: {}".format(loss_after)) assert loss_after < loss_before
def test_call_callable_pointer(workers): def foo(x): return x + 2 alice = workers["alice"] bob = workers["bob"] id_alice = 100 id_bob = 200 foo_wrapper = pointers.ObjectWrapper(id=id_alice, obj=foo) alice.register_obj(foo_wrapper, id_alice) foo_ptr = callable_pointer.create_callable_pointer( id=id_bob, id_at_location=id_alice, location=alice, owner=bob, tags="tags", description="description", register_pointer=True, ) res = foo_ptr(4) assert res == 6
def test_serde_object_wrapper_traced_module(): data = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]]) class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = torch.nn.Linear(2, 3) def forward(self, x): x = torch.nn.functional.relu(self.fc1(x)) return x obj = torch.jit.trace(Net(), data) obj_wrapper = pointers.ObjectWrapper(obj, id=200) msg = serde.serialize(obj_wrapper) obj_wrapper_received = serde.deserialize(msg) pred_before = obj(data) pred_after = obj_wrapper_received.obj(data) assert (pred_before == pred_after).all() assert obj_wrapper.id == obj_wrapper_received.id
def test_serde_object_wrapper_int(): obj = 4 obj_wrapper = pointers.ObjectWrapper(obj, id=100) msg = serde.serialize(obj_wrapper) obj_wrapper_received = serde.deserialize(msg) assert obj_wrapper.obj == obj_wrapper_received.obj assert obj_wrapper.id == obj_wrapper_received.id
def test_send_jit_scriptmodule(hook, workers): # pragma: no cover bob = workers["bob"] @torch.jit.script def foo(x): return x + 2 foo_wrapper = pointers.ObjectWrapper(obj=foo, id=99) foo_ptr = hook.local_worker.send(foo_wrapper, bob) res = foo_ptr(torch.tensor(4)) assert res == torch.tensor(6)
def _detail_object_wrapper(worker: AbstractWorker, obj_wrapper_tuple: str) -> pointers.ObjectWrapper: obj_wrapper = pointers.ObjectWrapper(id=obj_wrapper_tuple[0], obj=_detail(worker, obj_wrapper_tuple[1])) return obj_wrapper
def test_fit(): data, target = utils.create_gaussian_mixture_toy_data(nr_samples=100) fed_client = federated.FederatedClient() dataset = sy.BaseDataset(data, target) dataset_key = "gaussian_mixture" fed_client.add_dataset(dataset, key=dataset_key) def loss_fn(target, pred): return torch.nn.functional.cross_entropy(input=pred, target=target) class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = torch.nn.Linear(2, 3) self.fc2 = torch.nn.Linear(3, 2) torch.nn.init.xavier_normal_(self.fc1.weight) torch.nn.init.xavier_normal_(self.fc2.weight) def forward(self, x): x = torch.nn.functional.relu(self.fc1(x)) x = torch.nn.functional.relu(self.fc2(x)) return x model_untraced = Net() model = torch.jit.trace(model_untraced, data) model_id = 0 model_ow = pointers.ObjectWrapper(obj=model, id=model_id) loss_id = 1 loss_ow = pointers.ObjectWrapper(obj=loss_fn, id=loss_id) pred = model(data) loss_before = loss_fn(target=target, pred=pred) if PRINT_IN_UNITTESTS: # pragma: no cover print("Loss before training: {}".format(loss_before)) # Create and send train config train_config = sy.TrainConfig( batch_size=8, model=None, loss_fn=None, model_id=model_id, loss_fn_id=loss_id, lr=0.05, weight_decay=0.01, ) fed_client.set_obj(model_ow) fed_client.set_obj(loss_ow) fed_client.set_obj(train_config) fed_client.optimizer = None for curr_round in range(12): loss = fed_client.fit(dataset_key=dataset_key) if PRINT_IN_UNITTESTS and curr_round % 4 == 0: # pragma: no cover print("-" * 50) print("Iteration %s: alice's loss: %s" % (curr_round, loss)) new_model = fed_client.get_obj(model_id) pred = new_model.obj(data) loss_after = loss_fn(target=target, pred=pred) if PRINT_IN_UNITTESTS: # pragma: no cover: print("Loss after training: {}".format(loss_after)) assert loss_after < loss_before
def test_fit(fit_dataset_key, epochs): data, target = utils.create_gaussian_mixture_toy_data(nr_samples=100) fed_client = federated.FederatedClient() dataset = sy.BaseDataset(data, target) dataset_key = "gaussian_mixture" fed_client.add_dataset(dataset, key=dataset_key) def loss_fn(target, pred): return torch.nn.functional.cross_entropy(input=pred, target=target) class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = torch.nn.Linear(2, 3) self.fc2 = torch.nn.Linear(3, 2) torch.nn.init.xavier_normal_(self.fc1.weight) torch.nn.init.xavier_normal_(self.fc2.weight) def forward(self, x): x = torch.nn.functional.relu(self.fc1(x)) x = torch.nn.functional.relu(self.fc2(x)) return x model_untraced = Net() model = torch.jit.trace(model_untraced, data) model_id = 0 model_ow = pointers.ObjectWrapper(obj=model, id=model_id) loss_id = 1 loss_ow = pointers.ObjectWrapper(obj=loss_fn, id=loss_id) pred = model(data) loss_before = loss_fn(target=target, pred=pred) if PRINT_IN_UNITTESTS: # pragma: no cover print("Loss before training: {}".format(loss_before)) # Create and send train config train_config = sy.TrainConfig( batch_size=8, model=None, loss_fn=None, model_id=model_id, loss_fn_id=loss_id, optimizer_args={ "lr": 0.05, "weight_decay": 0.01 }, epochs=epochs, ) fed_client.set_obj(model_ow) fed_client.set_obj(loss_ow) fed_client.set_obj(train_config) fed_client.optimizer = None train_model(fed_client, fit_dataset_key, available_dataset_key=dataset_key, nr_rounds=3) if dataset_key == fit_dataset_key: loss_after = evaluate_model(fed_client, model_id, loss_fn, data, target) if PRINT_IN_UNITTESTS: # pragma: no cover print("Loss after training: {}".format(loss_after)) if loss_after >= loss_before: # pragma: no cover if PRINT_IN_UNITTESTS: print("Loss not reduced, train more: {}".format(loss_after)) train_model(fed_client, fit_dataset_key, available_dataset_key=dataset_key, nr_rounds=10) loss_after = evaluate_model(fed_client, model_id, loss_fn, data, target) assert loss_after < loss_before
def _wrap_and_send_obj(self, obj, location): """Wrappers object and send it to location.""" obj_with_id = pointers.ObjectWrapper(id=sy.ID_PROVIDER.pop(), obj=obj) obj_ptr = self.owner.send(obj_with_id, location) obj_id = obj_ptr.id_at_location return obj_ptr, obj_id
def test_evaluate(): # pragma: no cover data, target = utils.iris_data_partial() fed_client = federated.FederatedClient() dataset = sy.BaseDataset(data, target) dataset_key = "iris" fed_client.add_dataset(dataset, key=dataset_key) def loss_fn(pred, target): return torch.nn.functional.cross_entropy(input=pred, target=target) class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = torch.nn.Linear(4, 3) def forward(self, x): x = torch.nn.functional.relu(self.fc1(x)) return x model_untraced = Net() with torch.no_grad(): model_untraced.fc1.weight.set_( torch.tensor([ [0.0160, 1.3753, -0.1202, -0.9129], [0.1539, 0.3092, 0.0749, 0.2142], [0.0984, 0.6248, 0.0274, 0.1735], ])) model_untraced.fc1.bias.set_(torch.tensor([0.3477, 0.2970, -0.0799])) model = torch.jit.trace(model_untraced, data) model_id = 0 model_ow = pointers.ObjectWrapper(obj=model, id=model_id) loss_id = 1 loss_ow = pointers.ObjectWrapper(obj=loss_fn, id=loss_id) pred = model(data) loss_before = loss_fn(target=target, pred=pred) if PRINT_IN_UNITTESTS: # pragma: no cover print("Loss before training: {}".format(loss_before)) # Create and send train config train_config = sy.TrainConfig( batch_size=8, model=None, loss_fn=None, model_id=model_id, loss_fn_id=loss_id, optimizer_args=None, epochs=1, ) fed_client.set_obj(model_ow) fed_client.set_obj(loss_ow) fed_client.set_obj(train_config) fed_client.optimizer = None result = fed_client.evaluate(dataset_key=dataset_key, return_histograms=True, nr_bins=3, return_loss=True) test_loss_before = result["loss"] correct_before = result["nr_correct_predictions"] len_dataset = result["nr_predictions"] hist_pred_before = result["histogram_predictions"] hist_target = result["histogram_target"] if PRINT_IN_UNITTESTS: # pragma: no cover print("Evaluation result before training: {}".format(result)) assert len_dataset == 30 assert (hist_target == [10, 10, 10]).all() train_config = sy.TrainConfig( batch_size=8, model=None, loss_fn=None, model_id=model_id, loss_fn_id=loss_id, optimizer="SGD", optimizer_args={"lr": 0.01}, shuffle=True, epochs=2, ) fed_client.set_obj(train_config) train_model(fed_client, dataset_key, available_dataset_key=dataset_key, nr_rounds=50) result = fed_client.evaluate(dataset_key=dataset_key, return_histograms=True, nr_bins=3, return_loss=True) test_loss_after = result["loss"] correct_after = result["nr_correct_predictions"] len_dataset = result["nr_predictions"] hist_pred_after = result["histogram_predictions"] hist_target = result["histogram_target"] if PRINT_IN_UNITTESTS: # pragma: no cover print("Evaluation result: {}".format(result)) assert len_dataset == 30 assert (hist_target == [10, 10, 10]).all() assert correct_after > correct_before assert torch.norm( torch.tensor(hist_target - hist_pred_after)) < torch.norm( torch.tensor(hist_target - hist_pred_before))