Beispiel #1
0
def test_get_unknown_worker():

    hook = sy.TorchHook(torch)

    bob = VirtualWorker(hook, id="bob")
    charlie = VirtualWorker(hook, id="charlie")

    # if an unknown string or id representing a worker is given it fails
    with pytest.raises(WorkerNotFoundException):
        bob.get_worker("the_unknown_worker", fail_hard=True)

    with pytest.raises(WorkerNotFoundException):
        bob.get_worker(1, fail_hard=True)

    # if an instance of virtual worker is given it doesn't fail
    assert bob.get_worker(charlie).id == charlie.id
    assert charlie.id in bob._known_workers
Beispiel #2
0
    def test_get_unknown_worker(self):

        hook = sy.TorchHook(torch)

        bob = VirtualWorker(hook, id="bob")
        charlie = VirtualWorker(hook, id="charlie")

        # if an unknown string or id representing a worker is given it fails
        try:
            bob.get_worker("the_unknown_worker", fail_hard=True)
            assert False
        except WorkerNotFoundException:
            assert True

        try:
            bob.get_worker(1, fail_hard=True)
            assert False
        except WorkerNotFoundException:
            assert True

        # if an instance of virtual worker is given it doesn't fail
        assert bob.get_worker(charlie).id == charlie.id
        assert charlie.id in bob._known_workers
Beispiel #3
0
def test_search():
    bob = VirtualWorker(sy.torch.hook)

    x = (torch.tensor([1, 2, 3, 4, 5]).tag("#fun", "#mnist").describe(
        "The images in the MNIST training dataset.").send(bob))

    y = (torch.tensor([1, 2, 3, 4, 5]).tag("#not_fun", "#cifar").describe(
        "The images in the MNIST training dataset.").send(bob))

    z = (torch.tensor([1, 2, 3, 4, 5]).tag("#fun", "#boston_housing").describe(
        "The images in the MNIST training dataset.").send(bob))

    a = (torch.tensor([1, 2, 3, 4, 5]).tag(
        "#not_fun", "#boston_housing").describe(
            "The images in the MNIST training dataset.").send(bob))

    assert len(bob.search("#fun")) == 2
    assert len(bob.search("#mnist")) == 1
    assert len(bob.search("#cifar")) == 1
    assert len(bob.search("#not_fun")) == 2
    assert len(bob.search("#not_fun", "#boston_housing")) == 1
Beispiel #4
0
def create_sandbox(gbs, verbose=True, download_data=True):
    """There's some boilerplate stuff that most people who are
    just playing around would like to have. This will create
    that for you"""

    try:
        torch = gbs["torch"]
    except:
        torch = gbs["th"]

    global hook
    global bob
    global theo
    global alice
    global andy
    global jason
    global jon

    if download_data:  # pragma: no cover
        from sklearn.datasets import load_boston
        from sklearn.datasets import load_breast_cancer
        from sklearn.datasets import load_digits
        from sklearn.datasets import load_diabetes
        from sklearn.datasets import load_iris
        from sklearn.datasets import load_wine
        from sklearn.datasets import load_linnerud

        def load_sklearn(func, *tags):
            dataset = func()
            data = (torch.tensor(dataset["data"]).float().tag(*(
                list(tags) + ["#data"] +
                dataset["DESCR"].split("\n")[0].lower().split(" "))).describe(
                    dataset["DESCR"]))
            target = (torch.tensor(dataset["target"]).float().tag(*(
                list(tags) + ["#target"] +
                dataset["DESCR"].split("\n")[0].lower().split(" "))).describe(
                    dataset["DESCR"]))

            return data, target

        def distribute_dataset(data, workers):
            batch_size = int(data.shape[0] / len(workers))
            n_batches = len(workers)
            for batch_i in range(n_batches - 1):
                batch = data[batch_i * batch_size:(batch_i + 1) * batch_size]
                batch.tags = data.tags
                batch.description = data.description
                ptr = batch.send(workers[batch_i])
                ptr.child.garbage_collect_data = False

            batch = data[(n_batches - 1) * batch_size:]
            batch.tags = data.tags
            batch.description = data.description
            ptr = batch.send(workers[n_batches - 1])
            ptr.child.garbage_collect_data = False

    print("Setting up Sandbox...")

    if verbose:
        print("\t- Hooking PyTorch")
    hook = TorchHook(torch)

    if verbose:
        print("\t- Creating Virtual Workers:")
        print("\t\t- bob")
    bob = VirtualWorker(hook, id="bob")
    if verbose:
        print("\t\t- theo")
    theo = VirtualWorker(hook, id="theo")
    if verbose:
        print("\t\t- jason")
    jason = VirtualWorker(hook, id="jason")
    if verbose:
        print("\t\t- alice")
    alice = VirtualWorker(hook, id="alice")
    if verbose:
        print("\t\t- andy")
    andy = VirtualWorker(hook, id="andy")
    if verbose:
        print("\t\t- jon")
    jon = VirtualWorker(hook, id="jon")

    if verbose:
        print("\tStoring hook and workers as global variables...")
    gbs["hook"] = hook
    gbs["bob"] = bob
    gbs["theo"] = theo
    gbs["jason"] = jason
    gbs["alice"] = alice
    gbs["andy"] = andy
    gbs["jon"] = jon

    gbs["workers"] = [bob, theo, jason, alice, andy, jon]

    if download_data:  # pragma: no cover

        if verbose:
            print("\tLoading datasets from SciKit Learn...")
            print("\t\t- Boston Housing Dataset")
        boston = load_sklearn(load_boston,
                              *["#boston", "#housing", "#boston_housing"])
        if verbose:
            print("\t\t- Diabetes Dataset")
        diabetes = load_sklearn(load_diabetes, *["#diabetes"])
        if verbose:
            print("\t\t- Breast Cancer Dataset")
        breast_cancer = load_sklearn(load_breast_cancer)
        if verbose:
            print("\t- Digits Dataset")
        digits = load_sklearn(load_digits)
        if verbose:
            print("\t\t- Iris Dataset")
        iris = load_sklearn(load_iris)
        if verbose:
            print("\t\t- Wine Dataset")
        wine = load_sklearn(load_wine)
        if verbose:
            print("\t\t- Linnerud Dataset")
        linnerud = load_sklearn(load_linnerud)

        workers = [bob, theo, jason, alice, andy, jon]

        if verbose:
            print("\tDistributing Datasets Amongst Workers...")
        distribute_dataset(boston[0], workers)
        distribute_dataset(boston[1], workers)
        distribute_dataset(diabetes[0], workers)
        distribute_dataset(diabetes[1], workers)
        distribute_dataset(breast_cancer[0], workers)
        distribute_dataset(breast_cancer[1], workers)
        distribute_dataset(digits[0], workers)
        distribute_dataset(digits[1], workers)
        distribute_dataset(iris[0], workers)
        distribute_dataset(iris[1], workers)
        distribute_dataset(wine[0], workers)
        distribute_dataset(wine[1], workers)
        distribute_dataset(linnerud[0], workers)
        distribute_dataset(linnerud[1], workers)

    if verbose:
        print("\tCollecting workers into a VirtualGrid...")
    _grid = VirtualGrid(*gbs["workers"])
    gbs["grid"] = _grid

    print("Done!")
def main():
    args = define_and_get_arguments()
    print(args)
    hook = sy.TorchHook(torch)

    host = "localhost"

    if args.use_virtual:
        alice = VirtualWorker(id="alice", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="bob", hook=hook, verbose=args.verbose)
        charlie = VirtualWorker(id="charlie", hook=hook, verbose=args.verbose)
    else:
        kwargs_websocket = {
            "host": host,
            "hook": hook,
            "verbose": args.verbose
        }
        alice = WebsocketClientWorker(id="alice",
                                      port=8777,
                                      **kwargs_websocket)
        bob = WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket)
        charlie = WebsocketClientWorker(id="charlie",
                                        port=8779,
                                        **kwargs_websocket)

    workers = [alice, bob, charlie]

    use_cuda = args.cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}

    # Search multiple times should still work
    tr_alice = alice.search("#mnist", "#alice", "#train_tag")
    tr_bob = bob.search("#mnist", "#bob", "#train_tag")
    tr_charlie = charlie.search("#mnist", "#charlie", "#train_tag")

    base_data = []
    base_data.append(BaseDataset(tr_alice[0], tr_alice[1]))
    base_data.append(BaseDataset(tr_bob[0], tr_bob[1]))
    base_data.append(BaseDataset(tr_charlie[0], tr_charlie[1]))

    federated_train_loader = sy.FederatedDataLoader(
        FederatedDataset(base_data),
        batch_size=args.batch_size,
        shuffle=True,
        iter_per_worker=True,
        **kwargs,
    )

    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST(
            "../data",
            train=False,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ),
        batch_size=args.test_batch_size,
        shuffle=True,
        **kwargs,
    )

    model = Net().to(device)

    for epoch in range(1, args.epochs + 1):
        logger.info("Starting epoch %s/%s", epoch, args.epochs)
        model = train(model, device, federated_train_loader, args.lr,
                      args.federate_after_n_batches)
        test(model, device, test_loader)

    if args.save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")
def main():
    args = Arguments(False)

    hook = sy.TorchHook(torch)

    if args.use_virtual:
        alice = VirtualWorker(id="alice", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="bob", hook=hook, verbose=args.verbose)
        charlie = VirtualWorker(id="charlie", hook=hook, verbose=args.verbose)
    else:
        kwargs_websocket = {
            "host": "localhost",
            "hook": hook,
            "verbose": args.verbose
        }
        alice = WebsocketClientWorker(id="alice",
                                      port=8777,
                                      **kwargs_websocket)
        bob = WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket)
        charlie = WebsocketClientWorker(id="charlie",
                                        port=8779,
                                        **kwargs_websocket)

    workers = [alice, bob, charlie]
    clients_mem = torch.zeros(len(workers))

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    if use_cuda:
        # TODO Quickhack. Actually need to fix the problem moving the model to CUDA\n",
        torch.set_default_tensor_type(torch.cuda.FloatTensor)
    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")
    print(device)

    kwargs = {"num_workers": 0, "pin_memory": False} if use_cuda else {}

    federated_train_loader = sy.FederatedDataLoader(
        datasets.MNIST(
            "../data",
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ).federate(tuple(workers)),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs,
    )

    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST(
            "../data",
            train=False,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ),
        batch_size=args.test_batch_size,
        shuffle=True,
        **kwargs,
    )

    start = time.time()
    model = Net().to(device)
    optimizer = optim.SGD(model.parameters(), lr=args.lr)

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, federated_train_loader, optimizer, epoch,
              clients_mem)
        test(args, model, device, test_loader)
        t = time.time()
        print(t - start)

    if args.save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")

    end = time.time()
    print(end - start)
    print("Memory exchanged : ", clients_mem)
Beispiel #7
0
def test___init__():
    hook = sy.TorchHook(torch)

    tensor = torch.tensor([1, 2, 3, 4])

    worker_id = int(10e10 * random.random())
    alice_id = f"alice{worker_id}"
    alice = VirtualWorker(hook, id=alice_id)
    worker_id = int(10e10 * random.random())
    bob = VirtualWorker(hook, id=f"bob{worker_id}")
    worker_id = int(10e10 * random.random())
    charlie = VirtualWorker(hook, id=f"charlie{worker_id}")
    worker_id = int(10e10 * random.random())
    dawson = VirtualWorker(hook, id=f"dawson{worker_id}", data=[tensor])

    # Ensure adding data on signup functionality works as expected
    assert tensor.owner == dawson

    assert bob.get_worker(alice_id).id == alice.id
    assert bob.get_worker(alice).id == alice.id
    assert bob.get_worker(charlie).id == charlie.id

    bob.get_worker("the_unknown_worker")

    bob.add_worker(alice)
Beispiel #8
0
def test___init__():
    hook = sy.TorchHook(torch)

    tensor = torch.tensor([1, 2, 3, 4])

    alice = VirtualWorker(hook, id="alice")
    bob = VirtualWorker(hook, id="bob")
    charlie = VirtualWorker(hook, id="charlie")
    dawson = VirtualWorker(hook, id="dawson", data=[tensor])

    # Ensure adding data on signup functionality works as expected
    assert tensor.owner == dawson

    assert bob.get_worker("alice").id == alice.id
    assert bob.get_worker(alice).id == alice.id
    assert bob.get_worker(charlie).id == charlie.id

    bob.get_worker("the_unknown_worker")

    bob.add_worker(alice)
Beispiel #9
0
def main():
    args = define_and_get_arguments()

    hook = sy.TorchHook(torch)

    host = "localhost"

    if args.use_virtual:
        alice = VirtualWorker(id="hospital_a", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="hospital_b", hook=hook, verbose=args.verbose)
        charlie = VirtualWorker(id="hospital_c",
                                hook=hook,
                                verbose=args.verbose)
    else:
        kwargs_websocket = {
            "host": host,
            "hook": hook,
            "verbose": args.verbose
        }
        hospital_a = WebsocketClientWorker(id="hospital_a",
                                           port=8777,
                                           **kwargs_websocket)
        hospital_b = WebsocketClientWorker(id="hospital_b",
                                           port=8778,
                                           **kwargs_websocket)
        hospital_c = WebsocketClientWorker(id="hospital_c",
                                           port=8779,
                                           **kwargs_websocket)

        print()
        print(
            "*******************************************************************************************************"
        )
        print("building training channels ...")
        print(" #hospital_a, remote tensor reference: ", hospital_a)
        print(" #hospital_b, remote tensor reference: ", hospital_b)
        print(" #hospital_c, remote tensor reference: ", hospital_c)
        print()

    workers = [hospital_a, hospital_b, hospital_c]

    use_cuda = args.cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}

    # Search multiple times should still work
    tr_hospital_a = hospital_a.search("#chest_xray", "#hospital_a",
                                      "#train_tag")
    tr_hospital_b = hospital_b.search("#chest_xray", "#hospital_b",
                                      "#train_tag")
    tr_hospital_c = hospital_c.search("#chest_xray", "#hospital_c",
                                      "#train_tag")

    base_data = []
    base_data.append(BaseDataset(tr_hospital_a[0], tr_hospital_a[1]))
    base_data.append(BaseDataset(tr_hospital_b[0], tr_hospital_b[1]))
    base_data.append(BaseDataset(tr_hospital_c[0], tr_hospital_c[1]))

    federated_train_loader = sy.FederatedDataLoader(
        FederatedDataset(base_data),
        batch_size=args.batch_size,
        shuffle=True,
        iter_per_worker=True,
        **kwargs,
    )

    data_transforms = transforms.Compose([
        transforms.Resize(224),
        transforms.CenterCrop(224),
        transforms.RandomRotation(20),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    test = datasets.ImageFolder('chest_xray/small', data_transforms)

    local_test_loader = torch.utils.data.DataLoader(
        test, batch_size=args.test_batch_size, shuffle=True, **kwargs)

    model = resnet.resnet18_simple()
    # print("*******************************************************************************************************")
    # print("model architecture")
    # print(model)
    # print()

    print(
        "*******************************************************************************************************"
    )
    print("starting federated learning ...")
    for epoch in range(1, args.epochs + 1):
        logger.info(" starting fl training epoch %s/%s", epoch, args.epochs)
        model = fl_train(model, device, federated_train_loader, args.lr,
                         args.federate_after_n_batches)

        logger.info(" starting local inference")
        local_test(model, device, local_test_loader)

    if args.save_model:
        torch.save(model.state_dict(), "./log/chest_xray_resnet18.pt")
def experiment(no_cuda):

    # Creating num_workers clients

    hook = sy.TorchHook(torch)



    # Initializing arguments, with GPU usage or not
    args = Arguments(no_cuda)

    if args.use_virtual:
        alice = VirtualWorker(id="alice", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="bob", hook=hook, verbose=args.verbose)
        charlie = VirtualWorker(id="charlie", hook=hook, verbose=args.verbose)
    else:
        kwargs_websocket = {"host": "localhost", "hook": hook, "verbose": args.verbose}
        alice = WebsocketClientWorker(id="alice", port=8777, **kwargs_websocket)
        bob = WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket)
        charlie = WebsocketClientWorker(id="charlie", port=8779, **kwargs_websocket)
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    if use_cuda:
    # TODO Quickhack. Actually need to fix the problem moving the model to CUDA\n",
        torch.set_default_tensor_type(torch.cuda.FloatTensor)

    torch.manual_seed(args.seed)

    clients = [alice, bob, charlie]
    clients_mem = torch.zeros(len(clients))

    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 0, 'pin_memory': False} if use_cuda else {}


    # Federated data loader
    federated_train_loader = sy.FederatedDataLoader( # <-- this is now a FederatedDataLoader
      datasets.MNIST('../data', train=True, download=True,
                     transform=transforms.Compose([
                         transforms.ToTensor(),
                         transforms.Normalize((0.1307,), (0.3081,))
                     ]))
      .federate(clients), # <-- NEW: we distribute the dataset across all the workers, it's now a FederatedDataset
      batch_size=args.batch_size, shuffle=True, **kwargs)

    test_loader = torch.utils.data.DataLoader(
      datasets.MNIST('../data', train=False, transform=transforms.Compose([
                         transforms.ToTensor(),
                         transforms.Normalize((0.1307,), (0.3081,))
                     ])),
      batch_size=args.test_batch_size, shuffle=True, **kwargs)


    #creating the models for each client
    models,optimizers = [], []
    #print(device)
    for i in range(len(clients)):
        #print(i)
        models.append(Net1().to(device))
        models[i] = models[i].send(clients[i])
        optimizers.append(optim.SGD(params=models[i].parameters(),lr=0.1))



    start = time.time()
    #%%time
    model = Net2().to(device)
    optimizer = optim.SGD(model.parameters(), lr=args.lr) # TODO momentum is not supported at the moment

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, federated_train_loader, optimizer, epoch, models, optimizers,clients_mem)
        test(args, model, device, test_loader, models)
        t = time.time()
        print(t-start)
    if (args.save_model):
        torch.save(model.state_dict(), "mnist_cnn.pt")

    end = time.time()
    print(end - start)
    print("Memory exchanged : ",clients_mem)
    return clients_mem
Beispiel #11
0
def test___init__():
    hook = sy.TorchHook(torch)

    alice = VirtualWorker(hook, id="alice")
    bob = VirtualWorker(hook, id="bob", known_workers={alice.id: alice})
    charlie = VirtualWorker(hook, id="charlie")

    assert bob.get_worker("alice").id == alice.id
    assert bob.get_worker(alice).id == alice.id
    assert bob.get_worker(charlie).id == charlie.id

    bob.get_worker("the_unknown_worker")

    bob.add_worker(alice)