Exemple #1
0
def test_federated_dataset_search(workers):

    bob = workers["bob"]
    alice = workers["alice"]

    grid = sy.PrivateGridNetwork(*[bob, alice])

    train_bob = th.Tensor(th.zeros(1000, 100)).tag("data").send(bob)
    target_bob = th.Tensor(th.zeros(1000, 100)).tag("target").send(bob)

    train_alice = th.Tensor(th.zeros(1000, 100)).tag("data").send(alice)
    target_alice = th.Tensor(th.zeros(1000, 100)).tag("target").send(alice)

    data = grid.search("data")
    target = grid.search("target")

    datasets = [
        BaseDataset(data["bob"][0], target["bob"][0]),
        BaseDataset(data["alice"][0], target["alice"][0]),
    ]

    fed_dataset = sy.FederatedDataset(datasets)
    train_loader = sy.FederatedDataLoader(fed_dataset,
                                          batch_size=4,
                                          shuffle=False,
                                          drop_last=False)

    counter = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        counter += 1

    assert counter == len(train_loader), f"{counter} == {len(fed_dataset)}"
Exemple #2
0
def test_federated_dataloader_shuffle(workers):
    bob = workers["bob"]
    alice = workers["alice"]
    datasets = [
        fl.BaseDataset(th.tensor([1, 2]), th.tensor([1, 2])).send(bob),
        fl.BaseDataset(th.tensor([3, 4, 5, 6]), th.tensor([3, 4, 5,
                                                           6])).send(alice),
    ]
    fed_dataset = sy.FederatedDataset(datasets)

    fdataloader = sy.FederatedDataLoader(fed_dataset,
                                         batch_size=2,
                                         shuffle=True)
    for epoch in range(3):
        counter = 0
        for batch_idx, (data, target) in enumerate(fdataloader):
            if counter < 1:  # one batch for bob, two batches for alice (batch_size == 2)
                assert (
                    data.location.id == "bob"
                ), f"id should be bob, counter = {counter}, epoch = {epoch}"
            else:
                assert (
                    data.location.id == "alice"
                ), f"id should be alice, counter = {counter}, epoch = {epoch}"
            counter += 1
        assert counter == len(fdataloader), f"{counter} == {len(fdataloader)}"

    num_iterators = 2
    fdataloader = sy.FederatedDataLoader(fed_dataset,
                                         batch_size=2,
                                         num_iterators=num_iterators,
                                         shuffle=True)
    assert (fdataloader.num_iterators == num_iterators -
            1), f"{fdataloader.num_iterators} == {num_iterators - 1}"
Exemple #3
0
def test_federated_dataloader(workers):
    bob = workers["bob"]
    alice = workers["alice"]
    datasets = [
        fl.BaseDataset(th.tensor([1, 2]), th.tensor([1, 2])).send(bob),
        fl.BaseDataset(th.tensor([3, 4, 5, 6]), th.tensor([3, 4, 5,
                                                           6])).send(alice),
    ]
    fed_dataset = sy.FederatedDataset(datasets)

    fdataloader = sy.FederatedDataLoader(fed_dataset, batch_size=2)
    counter = 0
    for batch_idx, (data, target) in enumerate(fdataloader):
        counter += 1

    assert counter == len(fdataloader), f"{counter} == {len(fdataloader)}"

    fdataloader = sy.FederatedDataLoader(fed_dataset,
                                         batch_size=2,
                                         drop_last=True)
    counter = 0
    for batch_idx, (data, target) in enumerate(fdataloader):
        counter += 1

    assert counter == len(fdataloader), f"{counter} == {len(fdataloader)}"
Exemple #4
0
    def construct_FL_loader(data_pointer, **kwargs):
        """ Cast paired data & labels into configured tensor dataloaders
        Args:
            dataset (list(sy.BaseDataset)): A tuple of X features & y labels
            kwargs: Additional parameters to configure PyTorch's Dataloader
        Returns:
            Configured dataloader (th.utils.data.DataLoader)
        """
        federated_dataset = sy.FederatedDataset(data_pointer)

#         print(federated_dataset)

        federated_data_loader = sy.FederatedDataLoader(
            federated_dataset,
            batch_size=(
                model_hyperparams['batch_size']
                if model_hyperparams['batch_size']
                else len(federated_dataset)
            ),
            shuffle=True,
            iter_per_worker=True, # for subsequent parallelization
            **kwargs
        )

        return federated_data_loader
Exemple #5
0
def test_federated_dataset(workers):

    bob = workers["bob"]
    alice = workers["alice"]

    grid = sy.VirtualGrid(*[bob, alice])

    train_bob = th.Tensor(th.zeros(1000, 100)).tag("data").send(bob)
    target_bob = th.Tensor(th.zeros(1000, 100)).tag("target").send(bob)

    train_alice = th.Tensor(th.zeros(1000, 100)).tag("data").send(alice)
    target_alice = th.Tensor(th.zeros(1000, 100)).tag("target").send(alice)

    data, _ = grid.search("data")
    target, _ = grid.search("target")

    dataset = sy.FederatedDataset(data, target)
    train_loader = sy.FederatedDataLoader(dataset,
                                          batch_size=4,
                                          shuffle=False,
                                          drop_last=False)

    epochs = 2
    for epoch in range(1, epochs + 1):
        for batch_idx, (data, target) in enumerate(train_loader):
            pass
Exemple #6
0
def dataset_federate_noniid(dataset, workers, Ratio=[1, 1, 1], net='NOT CNN'):
    """
    Add a method to easily transform a torch.Dataset or a sy.BaseDataset
    into a sy.FederatedDataset. The dataset given is split in len(workers)
    part and sent to each workers
    """
    logger.info(
        f"Scanning and sending data to {', '.join([w.id for w in workers])}..."
    )
    datasets = []
    N = 0
    dataset_list = list(dataset)
    for n in range(0, len(workers)):
        ratio = Ratio[n] / sum(Ratio)  #计算比例
        num = round(ratio * len(dataset))  #根据比例计算要抽取的数据的长度
        Subset = dataset_list[N:N + num]  #抽取数据
        N = N + num
        data = []
        targets = []
        for d, t in Subset:
            data.append(d)
            targets.append(t)

        data = torch.cat(data)
        if net == 'CNN':
            data = torch.unsqueeze(data, 1)
        targets = torch.tensor(targets)
        worker = workers[n]
        logger.debug("Sending data to worker %s", worker.id)
        data = data.send(worker)
        targets = targets.send(worker)
        datasets.append(sy.BaseDataset(data, targets))  # .send(worker)

    logger.debug("Done!")
    return sy.FederatedDataset(datasets)
def _distribute_among_workers(dataset, workers):
    datasets = []

    for i, data in dataset.items():
        x_train, y_train = _data_target_split(data)

        data = x_train.send(workers[i])
        targets = y_train.send(workers[i])
        datasets.append(sy.BaseDataset(data, targets))

    return sy.FederatedDataset(datasets)
Exemple #8
0
def get_federated_dataset(data, users, context_size, hook):
    users_data = []
    workers = []
    for user in users:
        user_worker = sy.VirtualWorker(hook, id = user)
        cur_data = data[data.user == user]
        X, Y = extend_data(cur_data.X, cur_data.Y, context_size)
        X = th.tensor(X)
        Y = th.tensor(Y)
        users_data.append(sy.BaseDataset(X, Y).send(user_worker))
        workers.append(user_worker)
    return sy.FederatedDataset(users_data), workers
Exemple #9
0
def test_illegal_get(workers):
    """test getting error message when calling .get() on a dataset that's a part of fedratedDataset object"""
    bob = workers["bob"]
    alice = workers["alice"]

    alice_base_dataset = BaseDataset(th.tensor([3, 4, 5, 6]),
                                     th.tensor([3, 4, 5, 6]))
    datasets = [
        BaseDataset(th.tensor([1, 2]), th.tensor([1, 2])).send(bob),
        alice_base_dataset.send(alice),
    ]
    fed_dataset = sy.FederatedDataset(datasets)
    with pytest.raises(ValueError):
        fed_dataset["alice"].get()
Exemple #10
0
def test_federated_dataloader_one_worker(workers):
    bob = workers["bob"]

    datasets = [
        fl.BaseDataset(th.tensor([3, 4, 5, 6]), th.tensor([3, 4, 5,
                                                           6])).send(bob)
    ]

    fed_dataset = sy.FederatedDataset(datasets)
    num_iterators = len(datasets)
    fdataloader = sy.FederatedDataLoader(fed_dataset,
                                         batch_size=2,
                                         shuffle=True)
    assert fdataloader.num_iterators == 1, f"{fdataloader.num_iterators} == {1}"
Exemple #11
0
def test_get_dataset(workers):
    bob = workers["bob"]
    alice = workers["alice"]

    alice_base_dataset = BaseDataset(th.tensor([3, 4, 5, 6]),
                                     th.tensor([3, 4, 5, 6]))
    datasets = [
        BaseDataset(th.tensor([1, 2]), th.tensor([1, 2])).send(bob),
        alice_base_dataset.send(alice),
    ]
    fed_dataset = sy.FederatedDataset(datasets)
    dataset = fed_dataset.get_dataset("alice")

    assert len(fed_dataset) == 2
    assert len(dataset) == 4
Exemple #12
0
def test_extract_batches_per_worker(workers):
    bob = workers["bob"]
    alice = workers["alice"]

    datasets = [
        fl.BaseDataset(th.tensor([1, 2]), th.tensor([1, 2])).send(bob),
        fl.BaseDataset(th.tensor([3, 4, 5, 6]), th.tensor([3, 4, 5, 6])).send(alice),
    ]
    fed_dataset = sy.FederatedDataset(datasets)

    fdataloader = sy.FederatedDataLoader(fed_dataset, batch_size=2, shuffle=True)

    batches = utils.extract_batches_per_worker(fdataloader)

    assert len(batches.keys()) == len(
        datasets
    ), "each worker should appear as key in the batches dictionary"
Exemple #13
0
    def collect_datasets(self, grid):
        loaders = []
        tags = ['train', 'valid', 'test']
        for tag in tags:
            found_X = grid.search("#X", f"#{tag}")
            found_y = grid.search("#Y", f"#{tag}")

            datasets = []
            for worker in found_X.keys():
                datasets.append(
                    sy.BaseDataset(found_X[worker][0], found_y[worker][0]))

            dataset = sy.FederatedDataset(datasets)
            loaders.append(
                sy.FederatedDataLoader(
                    dataset, batch_size=self.model_config.batch_size))

        return loaders
Exemple #14
0
def _distribute_among_workers(samplers, dataset, workers):

    datasets = []

    # Each worker have it's own sampler; len(samplers)== len(workers)
    for idx, sampler in enumerate(samplers):

        loader = DataLoader(dataset,
                            batch_size=len(sampler),
                            shuffle=False,
                            sampler=sampler)

        # Loader always contains only one batch (because batch_size=len(sampler))
        for batch in loader:
            data = batch[0].send(workers[idx].id)
            targets = batch[1].send(workers[idx].id)
            datasets.append(sy.BaseDataset(data, targets))

    return sy.FederatedDataset(datasets)
Exemple #15
0
def test_federated_dataloader_num_iterators(workers):
    bob = workers["bob"]
    alice = workers["alice"]
    james = workers["james"]
    datasets = [
        fl.BaseDataset(th.tensor([1, 2]), th.tensor([1, 2])).send(bob),
        fl.BaseDataset(th.tensor([3, 4, 5, 6]), th.tensor([3, 4, 5,
                                                           6])).send(alice),
        fl.BaseDataset(th.tensor([7, 8, 9, 10]), th.tensor([7, 8, 9,
                                                            10])).send(james),
    ]

    fed_dataset = sy.FederatedDataset(datasets)
    num_iterators = len(datasets)
    fdataloader = sy.FederatedDataLoader(fed_dataset,
                                         batch_size=2,
                                         num_iterators=num_iterators,
                                         shuffle=True)
    assert (fdataloader.num_iterators == num_iterators -
            1), f"{fdataloader.num_iterators} == {num_iterators - 1}"
    counter = 0
    for batch_idx, batches in enumerate(fdataloader):
        assert (len(batches.keys()) == num_iterators -
                1), f"len(batches.keys()) == {num_iterators} - 1"
        if batch_idx < 1:
            data_bob, target_bob = batches[bob]
            assert data_bob.location.id == "bob", "id should be bob, batch_idx = {0}".format(
                batch_idx)
        else:  # bob is replaced by james
            data_james, target_james = batches[james]
            assert data_james.location.id == "james", "id should be james, batch_idx = {0}".format(
                batch_idx)
        if batch_idx < 2:
            data_alice, target_alice = batches[alice]
            assert data_alice.location.id == "alice", "id should be alice, batch_idx = {0}".format(
                batch_idx)
        counter += 1
    epochs = num_iterators - 1
    assert counter * (num_iterators - 1) == epochs * len(
        fdataloader), " == epochs * len(fdataloader)"
Exemple #16
0
def test_federated_dataset(workers):
    bob = workers["bob"]
    alice = workers["alice"]

    alice_base_dataset = BaseDataset(th.tensor([3, 4, 5, 6]), th.tensor([3, 4, 5, 6]))
    datasets = [
        BaseDataset(th.tensor([1, 2]), th.tensor([1, 2])).send(bob),
        alice_base_dataset.send(alice),
    ]

    fed_dataset = sy.FederatedDataset(datasets)

    assert fed_dataset.workers == ["bob", "alice"]
    assert len(fed_dataset) == 6

    fed_dataset["alice"].get()
    assert (fed_dataset["alice"].data == alice_base_dataset.data).all()
    assert fed_dataset["alice"][2] == (5, 5)
    assert len(fed_dataset["alice"]) == 4
    assert len(fed_dataset) == 6

    assert isinstance(fed_dataset.__str__(), str)
Exemple #17
0
def test_federated_dataloader_iter_per_worker(workers):
    bob = workers["bob"]
    alice = workers["alice"]
    james = workers["james"]
    datasets = [
        fl.BaseDataset(th.tensor([1, 2]), th.tensor([1, 2])).send(bob),
        fl.BaseDataset(th.tensor([3, 4, 5, 6]), th.tensor([3, 4, 5,
                                                           6])).send(alice),
        fl.BaseDataset(th.tensor([7, 8, 9, 10]), th.tensor([7, 8, 9,
                                                            10])).send(james),
    ]

    fed_dataset = sy.FederatedDataset(datasets)
    fdataloader = sy.FederatedDataLoader(fed_dataset,
                                         batch_size=2,
                                         iter_per_worker=True,
                                         shuffle=True)
    nr_workers = len(datasets)
    assert (fdataloader.num_iterators == nr_workers
            ), "num_iterators should be equal to number or workers"
    for batch_idx, batches in enumerate(fdataloader):
        assert len(
            batches.keys()) == nr_workers, "return a batch for each worker"
train_inputs = th.tensor(X_train,dtype=th.float)
train_labels = th.tensor(y_train)
test_inputs = th.tensor(X_test,dtype=th.float)
test_labels = th.tensor(y_test)

# Send the training and test data to the gatways in equal proportion.
# since there are two gateways we are splitting into two : 
train_idx = int(len(train_labels)/2)
test_idx = int(len(test_labels)/2)
gateway1_train_dataset = sy.BaseDataset(train_inputs[:train_idx], train_labels[:train_idx]).send(gateway1)
gateway2_train_dataset = sy.BaseDataset(train_inputs[train_idx:], train_labels[train_idx:]).send(gateway2)
gateway1_test_dataset = sy.BaseDataset(test_inputs[:test_idx], test_labels[:test_idx]).send(gateway1)
gateway2_test_dataset = sy.BaseDataset(test_inputs[test_idx:], test_labels[test_idx:]).send(gateway2)

# Create federated datasets, an extension of Pytorch TensorDataset class
federated_train_dataset = sy.FederatedDataset([gateway1_train_dataset, gateway2_train_dataset])
federated_test_dataset = sy.FederatedDataset([gateway1_test_dataset, gateway2_test_dataset])

# Create federated dataloaders, an extension of Pytorch DataLoader class
federated_train_loader = sy.FederatedDataLoader(federated_train_dataset, shuffle=True, batch_size=BATCH_SIZE)
federated_test_loader = sy.FederatedDataLoader(federated_test_dataset, shuffle=False, batch_size=BATCH_SIZE)

len(federated_train_loader)

"""**Initialising Neural Network**"""

print(n_class)

import torch.nn as nn
nn.Dropout(0.5)
class Net(nn.Module):
Exemple #19
0
    return train_dataloaders, val_dataloaders


# In[13]:

# %%time
precision_fractional = 3

private_ds, central_val_dl = get_private_data_loaders(
    workers=workers,
    #                                                       crypto_provider=crypto_provider,
    #                                                       precision_fractional=precision_fractional,
)

print(len(private_ds), len(central_val_dl))
federated_train_ds = sy.FederatedDataset(private_ds)
federated_train_dl = sy.FederatedDataLoader(federated_train_ds,
                                            shuffle=False,
                                            batch_size=args.batch_size)

# In[ ]:

# w = workers[1]
# len(list(w._objects.keys()))
# x = w._objects[list(w._objects.keys())[0]]
# print(x)
# len(w.search(['#wesad', '#chest_device', '#target']))

# In[3]:

get_ipython().run_line_magic('autoreload', '')
def main():
    args = define_and_get_arguments()

    hook = sy.TorchHook(torch)

    if args.use_virtual:
        alice = VirtualWorker(id="alice", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="bob", hook=hook, verbose=args.verbose)
    else:
        kwargs_websocket = {"host": "localhost", "hook": hook, "verbose": args.verbose}
        alice = WebsocketClientWorker(id="alice", port=8777, **kwargs_websocket)
        bob = WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket)

    workers = [alice, bob]

    use_cuda = args.cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}

    if DATATHON:
        bob_data, bob_target = preprocessed_data("EICU_DB", True, 0)
        alice_data, alice_target = preprocessed_data("MIMIC_DB", True, 1)
        alice_train_dataset = sy.BaseDataset(alice_data, alice_target).send(alice)
        bob_train_dataset = sy.BaseDataset(bob_data, bob_target).send(bob)

        federated_train_dataset = sy.FederatedDataset([alice_train_dataset, bob_train_dataset])

        federated_train_loader = sy.FederatedDataLoader(federated_train_dataset, shuffle = True, batch_size=args.batch_size, iter_per_worker=True, **kwargs,)

        test_loader_mimic = get_dataloader(is_train=False, batch_size=args.batch_size, is_mimic = 1 )
        test_loader_eicu = get_dataloader(is_train=False, batch_size=args.batch_size, is_mimic = 0 )

    else:
        federated_train_loader = sy.FederatedDataLoader(
            datasets.MNIST(
                "../data",
                train=True,
                download=True,
                transform=transforms.Compose(
                    [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
                ),
            ).federate(tuple(workers)),
            batch_size=args.batch_size,
            shuffle=True,
            iter_per_worker=True,
            **kwargs,
        )

        test_loader = torch.utils.data.DataLoader(
            datasets.MNIST(
                "../data",
                train=False,
                transform=transforms.Compose(
                    [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
                ),
            ),
            batch_size=args.test_batch_size,
            shuffle=True,
            **kwargs,
        )

    model = Net().to(device)

    for epoch in range(1, args.epochs + 1):
        logger.info("Starting epoch %s/%s", epoch, args.epochs)
        model = train(model, device, federated_train_loader, args.lr, args.federate_after_n_batches)
        test(model, device, test_loader_mimic, 1)
        test(model, device, test_loader_eicu, 0)

    if args.save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")
res=emotion_analysis(custom[0])
res=res+".mp3"
x= img1
x = x.reshape([48, 48]);
plt.gray()
plt.imshow(x)
plt.show()
ipd.Audio(filename='/content/gdrive/My Drive/Songs/{}'.format(res),autoplay=True)

"""## Federated Learning"""

!pip install syft

import syft as sy  # <-- NEW: import the Pysyft library
hook = sy.TorchHook(torch)  # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning
bob = sy.VirtualWorker(hook, id="bob")  # <-- NEW: define remote worker bob
alice = sy.VirtualWorker(hook, id="alice")  # <-- NEW: and alice

size = len(train_loader)

bobs_data = sy.BaseDataset(torch.tensor(x_train[:size//2]).type(torch.LongTensor), torch.tensor(y_train[:size//2]).type(torch.LongTensor)).send(bob)
alices_data = sy.BaseDataset(torch.tensor(x_train[size//2+1:]).type(torch.LongTensor), torch.tensor(y_train[size//2+1:]).type(torch.LongTensor)).send(alice)
federated_train_dataset = sy.FederatedDataset([bobs_data, alices_data])
federated_train_loader = sy.FederatedDataLoader(federated_train_dataset, batch_size=batch_size)

test_dataset = torch.utils.data.TensorDataset(torch.tensor(x_test).type(torch.LongTensor), torch.tensor(y_test).type(torch.LongTensor))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size)



def run_train_test(workers, ports, ids, X_train, X_test, y_train, y_test,
                   encoder):
    # Hook PyTorch ie add extra functionalities to support Federated Learning
    hook = sy.TorchHook(torch)
    # Sets the seed for generating random numbers.
    torch.manual_seed(1)
    # Select CPU computation, in case you want GPU use "cuda" instead
    device = torch.device("cpu")
    # Data will be distributed among these VirtualWorkers.
    # Remote training of the model will happen here.

    kwargs_websocket_bob = {"host": workers[1], "hook": hook}
    #gatway2 = sy.VirtualWorker(hook, id="gatway2")
    gatway2 = WebsocketClientWorker(id=ids[1],
                                    port=ports[1],
                                    **kwargs_websocket_bob)

    kwargs_websocket_alice = {"host": workers[0], "hook": hook}
    #gatway1 = sy.VirtualWorker(hook, id="gatway1")
    gatway1 = WebsocketClientWorker(id=ids[0],
                                    port=ports[0],
                                    **kwargs_websocket_alice)
    #kwargs_websocket_bob2 = {"host": workers[2], "hook": hook}
    #gatway2 = sy.VirtualWorker(hook, id="gatway2")
    #gatway3 = WebsocketClientWorker(id=ids[2], port=ports[2], **kwargs_websocket_bob2)

    #kwargs_websocket_alice2 = {"host": workers[3], "hook": hook}
    #gatway1 = sy.VirtualWorker(hook, id="gatway1")
    #gatway4= WebsocketClientWorker(id=ids[3], port=ports[3], **kwargs_websocket_alice2)
    # Number of times we want to iterate over whole training data
    #gatway3=sy.VirtualWorker(hook, id="gatway3")
    #gatway4=sy.VirtualWorker(hook, id="gatway4")
    BATCH_SIZE = 1000
    EPOCHS = 5
    LOG_INTERVAL = 5
    lr = 0.01

    n_feature = X_train.shape[1]
    n_class = np.unique(y_train).shape[0]

    print("Number of training features : ", n_feature)
    print("Number of training classes : ", n_class)
    # Create pytorch tensor from X_train,y_train,X_test,y_test
    train_inputs = torch.tensor(X_train,
                                dtype=torch.float).tag("#iot", "#network",
                                                       "#data", "#train")
    train_labels = torch.tensor(y_train).tag("#iot", "#network", "#target",
                                             "#train")
    test_inputs = torch.tensor(X_test,
                               dtype=torch.float).tag("#iot", "#network",
                                                      "#data", "#test")
    test_labels = torch.tensor(y_test).tag("#iot", "#network", "#target",
                                           "#test")

    # Send the training and test data to the gatways in equal proportion.
    train_idx = int(len(train_labels) / 2)
    test_idx = int(len(test_labels) / 2)
    gatway1_train_dataset = sy.BaseDataset(
        train_inputs[:train_idx], train_labels[:train_idx]).send(gatway1)
    gatway2_train_dataset = sy.BaseDataset(
        train_inputs[train_idx:2 * train_idx],
        train_labels[train_idx:2 * train_idx]).send(gatway2)
    #gatway3_train_dataset = sy.BaseDataset(train_inputs[2*train_idx:3*train_idx], train_labels[2*train_idx:3*train_idx]).send(gatway3)
    #gatway4_train_dataset = sy.BaseDataset(train_inputs[3*train_idx:], train_labels[3*train_idx:]).send(gatway4)
    gatway1_test_dataset = sy.BaseDataset(test_inputs[:test_idx],
                                          test_labels[:test_idx]).send(gatway1)
    gatway2_test_dataset = sy.BaseDataset(test_inputs[test_idx:2 * test_idx],
                                          test_labels[test_idx:2 *
                                                      test_idx]).send(gatway2)
    #gatway3_test_dataset = sy.BaseDataset(test_inputs[2*test_idx:2*test_idx], test_labels[2*test_idx:3*test_idx]).send(gatway3)
    #gatway4_test_dataset = sy.BaseDataset(test_inputs[3*test_idx:], test_labels[3*test_idx:]).send(gatway3)

    # Create federated datasets, an extension of Pytorch TensorDataset class
    federated_train_dataset = sy.FederatedDataset([
        gatway1_train_dataset, gatway2_train_dataset
    ])  #,gatway3_train_dataset,gatway4_train_dataset])
    federated_test_dataset = sy.FederatedDataset(
        [gatway1_test_dataset,
         gatway2_test_dataset])  #,gatway3_test_dataset,gatway4_test_dataset])

    # Create federated dataloaders, an extension of Pytorch DataLoader class
    federated_train_loader = sy.FederatedDataLoader(federated_train_dataset,
                                                    shuffle=True,
                                                    batch_size=BATCH_SIZE)
    federated_test_loader = sy.FederatedDataLoader(federated_test_dataset,
                                                   shuffle=False,
                                                   batch_size=BATCH_SIZE)
    # Initialize the model
    model = Net(n_feature, n_class)

    #Initialize the SGD optimizer
    optimizer = optim.SGD(model.parameters(), lr=lr)

    for epoch in range(1, EPOCHS + 1):
        # Train on the training data in a federated way
        start1 = datetime.datetime.now()
        model = train(model, device, federated_train_loader, optimizer, epoch,
                      BATCH_SIZE, LOG_INTERVAL)
        end1 = datetime.datetime.now()
        print("Time Taken to train epoch %d is ", end1 - start1)
        if epoch == 1:
            total = end1 - start1
        else:
            total += end1 - start1
        # Check the test accuracy on unseen test data in a federated way
        start2 = datetime.datetime.now()
        test(model, device, federated_test_loader)
        end2 = datetime.datetime.now()
        print("Time Taken to test epoch %d is ", end2 - start2)
    print("Total training time = ", total)

    # Save the model
    torch.save(model.state_dict(), "binaize-threat-model_10.pt")
    # Reload the model in a new model object
    model_new = Net(n_feature, n_class)
    model_new.load_state_dict(torch.load("binaize-threat-model_10.pt"))
    model_new.eval()
    process = os.popen(
        "sudo scp -i /root/.ssh/id_rsa -o stricthostkeychecking=no binaize-threat-model_fully_trained.pt root@%s:/home/ayush/ADS/predict_workers"
        % (workers[0]))
    output = process.read()
    process = os.popen(
        "sudo scp -i /root/.ssh/id_rsa -o stricthostkeychecking=no binaize-threat-model_fully_trained.pt root@%s:/home/ayush/ADS/predict_workers"
        % (workers[1]))
    output = process.read()
    # Take the 122th record from the test data
    idx = 122
    data = test_inputs[idx]
    pred = model_new(data)
    pred_label = int(pred.argmax().data.cpu().numpy())
    pred_threat = encoder.inverse_transform([pred_label])[0]
    print("Predicted threat type : ", pred_threat)
    actual_label = int(test_labels[idx].data.cpu().numpy())
    actual_threat = encoder.inverse_transform([actual_label])[0]
    print("Actual threat type : ", actual_threat)

    # Take the 159th record from the test data
    idx = 159
    data = test_inputs[idx]
    pred = model_new(data)
    pred_label = int(pred.argmax().data.cpu().numpy())
    pred_threat = encoder.inverse_transform([pred_label])[0]
    print("Predicted threat type : ", pred_threat)
    actual_label = int(test_labels[idx].data.cpu().numpy())
    actual_threat = encoder.inverse_transform([actual_label])[0]
    print("Actual threat type : ", actual_threat)
"""We load a model and an optimizer"""

n_features = boston_data['alice'][0].shape[1]
n_targets = 1

model = th.nn.Linear(n_features, n_targets).to(device)
"""Here we cast the data fetched in a `FederatedDataset`. See the workers which hold part of the data."""
print("A total of {} workers".format(len(boston_data.keys())))
# Cast the result in BaseDatasets
datasets = []
for worker in boston_data.keys():
    dataset = sy.BaseDataset(boston_data[worker][0], boston_target[worker][0])
    datasets.append(dataset)

# Build the FederatedDataset object
dataset = sy.FederatedDataset(datasets)
print(dataset.workers)
optimizers = {}
for worker in dataset.workers:
    optimizers[worker] = th.optim.Adam(params=model.parameters(), lr=1e-2)
    # optimizers[worker] = th.optim.SGD(model.parameters(), lr=0.05)
"""We put it in a `FederatedDataLoader` and specify options"""

train_loader = sy.FederatedDataLoader(dataset,
                                      batch_size=32,
                                      shuffle=False,
                                      drop_last=False)
"""And finally we iterate over epochs. You can see how similar this is compared to pure and local PyTorch training!"""
epochs = 5

from collections import defaultdict