Esempio n. 1
0
def wroker_config(args):
    hook = sy.TorchHook(torch)

    logger.info("Worker setup.\n")
    if args.use_virtual:
        alice = VirtualWorker(id="alice", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="bob", hook=hook, verbose=args.verbose)
        charlie = VirtualWorker(id="charlie", hook=hook, verbose=args.verbose)
        workers = [alice, bob, charlie]
    else:
        if args.localworkers:
            kwargs_websocket = {
                "host": "localhost",
                "hook": hook,
                "verbose": args.verbose
            }
            alice = WebsocketClientWorker(id="alice",
                                          port=8777,
                                          **kwargs_websocket)
            bob = WebsocketClientWorker(id="bob",
                                        port=8778,
                                        **kwargs_websocket)
            charlie = WebsocketClientWorker(id="charlie",
                                            port=8779,
                                            **kwargs_websocket)
            workers = [alice, bob, charlie]
        else:
            kwargs_websocket_Pi4_R1_1 = {
                "host": "128.226.77.157",
                "hook": hook
            }
            Pi4_R1_1 = WebsocketClientWorker(id="Pi4_R1_1",
                                             port=8777,
                                             **kwargs_websocket_Pi4_R1_1)

            kwargs_websocket_Pi4_R1_2 = {
                "host": "128.226.78.128",
                "hook": hook
            }
            Pi4_R1_2 = WebsocketClientWorker(id="Pi4_R1_2",
                                             port=8777,
                                             **kwargs_websocket_Pi4_R1_2)

            kwargs_websocket_Pi4_R1_3 = {
                "host": "128.226.88.155",
                "hook": hook
            }
            Pi4_R1_3 = WebsocketClientWorker(id="Pi4_R1_3",
                                             port=8777,
                                             **kwargs_websocket_Pi4_R1_3)

            kwargs_websocket_Pi4_R1_4 = {"host": "128.226.79.31", "hook": hook}
            Pi4_R1_4 = WebsocketClientWorker(id="Pi4_R1_4",
                                             port=8777,
                                             **kwargs_websocket_Pi4_R1_4)

            workers = [Pi4_R1_1, Pi4_R1_2, Pi4_R1_3, Pi4_R1_4]

    return workers
Esempio n. 2
0
async def handle_worker(websocket,
                        path,
                        hook,
                        workers,
                        update_queue,
                        results_box,
                        model,
                        optimizer,
                        learning_rate,
                        verbose=False):
    global syft_port_next
    message = json.loads(await websocket.recv())
    action = message['action']

    if action == 'get_available_port':
        log('Sending current available syft port %d' % syft_port_next, verbose)
        message = json.dumps({'success': True, 'syft_port': syft_port_next})
        await websocket.send(message)
        syft_port_next += 1

    if action == 'register':
        name, host, syft_port = message['name'], message['host'], message[
            'syft_port']
        new_worker = WebsocketClientWorker(host=host,
                                           hook=hook,
                                           id=name,
                                           port=syft_port,
                                           log_msgs=True,
                                           verbose=verbose,
                                           is_client_worker=False)
        workers[name] = new_worker
        optimizer[workers[name]] = torch.optim.Adam(model.parameters(),
                                                    lr=learning_rate)
        message = json.dumps({'success': True, 'num_peers': len(workers)})
        log('Worker %s (%s) registered, port %d' % (name, host, syft_port),
            verbose)
        await websocket.send(message)

    elif action == 'request_model':
        name = message['name']
        log('Put %s into update queue' % name, verbose)
        update_queue.put(name)
        message = json.dumps({'success': True})
        await websocket.send(message)

    elif action == 'ping_conductor':
        name = message['name']

        result = {'in_queue': name in update_queue, 'num_peers': len(workers)}
        #log('conductor pinged: send back %s %s' % (name, 'True' if result['in_queue'] else 'False'), verbose)
        if not result['in_queue'] and name in results_box:
            result['user'] = results_box[name]
        message = json.dumps(result, cls=NumpyArrayEncoder)
        await websocket.send(message)
Esempio n. 3
0
def connect_to_nodes(nodes):
    hook = syft.TorchHook(torch)
    workers = []
    for i in range(2, int(nodes) + 1):
        ip = '10.0.0.{}'.format(str(i))
        socket = {"host": ip, "hook": hook, "verbose": True}

        worker_name = 'h{}'.format(str(i))
        clientworker = WebsocketClientWorker(id=worker_name,
                                             port=8778,
                                             **socket)
        workers.append(clientworker)
    return workers
Esempio n. 4
0
async def fit_model_on_worker(worker: WebsocketClientWorker,
                              traced_model: torch.jit.ScriptModule,
                              batch_size: int, curr_round: int, lr: float,
                              no_federated_epochs: int):
    train_config = syft.TrainConfig(
        model=traced_model,
        loss_fn=loss_fn,
        batch_size=batch_size,
        shuffle=True,
        epochs=no_federated_epochs,
        optimizer="SGD",
        optimizer_args={"lr": lr},
    )

    #send monitoring command
    message = worker.create_message_execute_command(
        command_name="start_monitoring", command_owner="self")
    serialized_message = syft.serde.serialize(message)
    worker._recv_msg(serialized_message)
    #send the training config
    train_config.send(worker)
    #Call async fit on worker - async fit calls the method calls self fit method
    print("Training round {}, calling fit on worker: {}".format(
        curr_round, worker.id))
    loss = await worker.async_fit(dataset_key="targeted", return_ids=[0])
    print("Training round: {}, worker: {}, avg_loss: {}".format(
        curr_round, worker.id, loss.item()))
    #Call back to the model
    model = train_config.get_model().obj
    #Stop monitoring command
    message = worker.create_message_execute_command(
        command_name="stop_monitoring", command_owner="self")
    serialized_message = syft.serde.serialize(message)
    network_info = worker._recv_msg(serialized_message)

    #Deserialize the response recieved
    network_info = syft.serde.deserialize(network_info)
    return worker.id, model, loss, network_info
Esempio n. 5
0
def instance(args,kwargs_websocket):
    """ instance the workers  
    args:
        args: the arguments
        kwargs_websocket: the dict of host and hook 
    Return:
        worker_instances: the list of the workers instances 
    """
    clients = []
    for i in range(args.clients):
        clients.append(WebsocketClientWorker(id=str(i), port=args.client_port+i, **kwargs_websocket))
    worker_instances = [client for client in clients]
    for worker in worker_instances:
        print("Client: " ,worker)
    return worker_instances
Esempio n. 6
0
def test_create_already_existing_worker_with_different_type(hook, start_proc):
    # Shares tensor with bob
    bob = sy.VirtualWorker(hook, "bob")
    _ = th.tensor([1, 2, 3]).send(bob)

    kwargs = {"id": "fed1", "host": "localhost", "port": 8765, "hook": hook}
    server = start_proc(WebsocketServerWorker, **kwargs)

    time.sleep(0.1)

    # Recreates bob as a different type of worker
    kwargs = {"id": "bob", "host": "localhost", "port": 8765, "hook": hook}
    with pytest.raises(RuntimeError):
        bob = WebsocketClientWorker(**kwargs)

    server.terminate()
def main():
    hook = sy.TorchHook(torch)
    squad = WebsocketClientWorker(id=1,
                                  host='localhost',
                                  port=7171,
                                  hook=hook,
                                  verbose=True)

    message = data
    message_ptr = message.send(squad)
    print(message_ptr)
    print('sent model data. now squad has %d objects' %
          (message_ptr.location.objects_count_remote()))

    # get squad updated model
    model = nn.Linear(1, 1)
    train(model)
Esempio n. 8
0
def instantiate_websocket_client_worker(max_tries=5, sleep_time=0.1, **kwargs):  # pragma: no cover
    """Helper function to instantiate the websocket client.

    If a connection is refused, we wait a bit (`sleep_time` seconds) and try again.
    After `max_tries` failed tries, a ConnectionRefusedError is raised.
    """
    retry_counter = 0
    connection_open = False
    while not connection_open:
        try:
            remote_proxy = WebsocketClientWorker(**kwargs)
            connection_open = True
        except ConnectionRefusedError as e:
            if retry_counter < max_tries:
                retry_counter += 1
                time.sleep(sleep_time)
            else:
                raise e
    return remote_proxy
Esempio n. 9
0
import torch
import syft
from syft.workers.websocket_server import WebsocketServerWorker
from syft.workers.websocket_client import WebsocketClientWorker

hook = syft.TorchHook(torch)

#local_worker = WebsocketServerWorker(host='localhost', hook=hook, id=0, port=8182, log_msgs=True, verbose=True)

#hook = syft.TorchHook(torch, local_worker=local_worker)

remote_client = WebsocketClientWorker(host='localhost',
                                      hook=hook,
                                      id=2,
                                      port=8182,
                                      log_msgs=True,
                                      verbose=True)

hook.local_worker.add_worker(remote_client)

y = torch.tensor([1, 2, 3, 2, 4])
y.send(remote_client)

x = torch.tensor([3, 1, 5, -1, 0, 44])
x.send(remote_client)
Esempio n. 10
0
            # 7) change those weights
            opt.step()
            # 8) get model (with gradients)
            model_c = model_c.get()
            # 9) print our progress
            print(data.location.id, loss.get())
    return model_c


if __name__ == '__main__':
    # create a client workers mapping to the server workers in remote machines
    # remote_client_1 = PublicGridNetwork(hook=hook, gateway_url='http://localhost:8182')
    # remote_client_2 = PublicGridNetwork(hook=hook, gateway_url='http://localhost:8183')
    remote_client_1 = WebsocketClientWorker(
        host='localhost',
        # host = '192.168.0.102', # the host of remote machine, the same as the Server host
        hook=hook,
        id='server1',
        port=8182)
    remote_client_2 = WebsocketClientWorker(
        host='localhost',
        # host = '192.168.0.102', # the host of remote machine, the same as the Server host
        hook=hook,
        id='server2',
        port=8183)
    remote_clients_list = [remote_client_1, remote_client_2]
    print('>>> remote_client_1', remote_client_1)
    print('>>> remote_client_2', remote_client_2)

    # get the data pointers which point to the real data in remote machines for training model
    datasets = []
    for remote_client in remote_clients_list:
Esempio n. 11
0
def make_remote_client(host, hook, id, port):
    from syft.workers.websocket_client import WebsocketClientWorker
    return WebsocketClientWorker(host=host, hook=hook, id=id, port=port)
Esempio n. 12
0
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))


if __name__ == "__main__":
    hook = sy.TorchHook(torch)
    kwargs_websocket = {"host": "localhost", "hook": hook}
    alice = WebsocketClientWorker(id="alice", port=8777, **kwargs_websocket)
    device = torch.device("cuda" if use_cuda else "cpu")

    # workers = [alice, bob, charlie]
    workers = [alice]
    grid = sy.PrivateGridNetwork(*workers)

    data = grid.search("#mnist", "#data")
    print(f"Search data: {len(data.keys())}")

    target = grid.search("#mnist", "#target")
    print(f"Search target: {len(target.keys())}")

    datasets_my = []
    for worker in data.keys():
        dataset = sy.BaseDataset(data[worker][0], target[worker][0])
def main():
    args = define_and_get_arguments()

    hook = sy.TorchHook(torch)

    if args.use_virtual:
        alice = VirtualWorker(id="alice", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="bob", hook=hook, verbose=args.verbose)
        charlie = VirtualWorker(id="charlie", hook=hook, verbose=args.verbose)
    else:
        kwargs_websocket = {
            "host": "localhost",
            "hook": hook,
            "verbose": args.verbose
        }
        alice = WebsocketClientWorker(id="alice",
                                      port=8777,
                                      **kwargs_websocket)
        bob = WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket)
        charlie = WebsocketClientWorker(id="charlie",
                                        port=8779,
                                        **kwargs_websocket)

    workers = [alice, bob, charlie]

    use_cuda = args.cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}

    federated_train_loader = sy.FederatedDataLoader(
        datasets.MNIST(
            "../data",
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ).federate(tuple(workers)),
        batch_size=args.batch_size,
        shuffle=True,
        iter_per_worker=True,
        **kwargs,
    )

    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST(
            "../data",
            train=False,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ),
        batch_size=args.test_batch_size,
        shuffle=True,
        **kwargs,
    )

    model = Net().to(device)

    for epoch in range(1, args.epochs + 1):
        logger.info("Starting epoch %s/%s", epoch, args.epochs)
        model = train(model, device, federated_train_loader, args.lr,
                      args.federate_after_n_batches)
        test(model, device, test_loader)

    if args.save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")
def run_train_test(workers, ports, ids, X_train, X_test, y_train, y_test,
                   encoder):
    # Hook PyTorch ie add extra functionalities to support Federated Learning
    hook = sy.TorchHook(torch)
    # Sets the seed for generating random numbers.
    torch.manual_seed(1)
    # Select CPU computation, in case you want GPU use "cuda" instead
    device = torch.device("cpu")
    # Data will be distributed among these VirtualWorkers.
    # Remote training of the model will happen here.

    kwargs_websocket_bob = {"host": workers[1], "hook": hook}
    #gatway2 = sy.VirtualWorker(hook, id="gatway2")
    gatway2 = WebsocketClientWorker(id=ids[1],
                                    port=ports[1],
                                    **kwargs_websocket_bob)

    kwargs_websocket_alice = {"host": workers[0], "hook": hook}
    #gatway1 = sy.VirtualWorker(hook, id="gatway1")
    gatway1 = WebsocketClientWorker(id=ids[0],
                                    port=ports[0],
                                    **kwargs_websocket_alice)
    #kwargs_websocket_bob2 = {"host": workers[2], "hook": hook}
    #gatway2 = sy.VirtualWorker(hook, id="gatway2")
    #gatway3 = WebsocketClientWorker(id=ids[2], port=ports[2], **kwargs_websocket_bob2)

    #kwargs_websocket_alice2 = {"host": workers[3], "hook": hook}
    #gatway1 = sy.VirtualWorker(hook, id="gatway1")
    #gatway4= WebsocketClientWorker(id=ids[3], port=ports[3], **kwargs_websocket_alice2)
    # Number of times we want to iterate over whole training data
    #gatway3=sy.VirtualWorker(hook, id="gatway3")
    #gatway4=sy.VirtualWorker(hook, id="gatway4")
    BATCH_SIZE = 1000
    EPOCHS = 5
    LOG_INTERVAL = 5
    lr = 0.01

    n_feature = X_train.shape[1]
    n_class = np.unique(y_train).shape[0]

    print("Number of training features : ", n_feature)
    print("Number of training classes : ", n_class)
    # Create pytorch tensor from X_train,y_train,X_test,y_test
    train_inputs = torch.tensor(X_train,
                                dtype=torch.float).tag("#iot", "#network",
                                                       "#data", "#train")
    train_labels = torch.tensor(y_train).tag("#iot", "#network", "#target",
                                             "#train")
    test_inputs = torch.tensor(X_test,
                               dtype=torch.float).tag("#iot", "#network",
                                                      "#data", "#test")
    test_labels = torch.tensor(y_test).tag("#iot", "#network", "#target",
                                           "#test")

    # Send the training and test data to the gatways in equal proportion.
    train_idx = int(len(train_labels) / 2)
    test_idx = int(len(test_labels) / 2)
    gatway1_train_dataset = sy.BaseDataset(
        train_inputs[:train_idx], train_labels[:train_idx]).send(gatway1)
    gatway2_train_dataset = sy.BaseDataset(
        train_inputs[train_idx:2 * train_idx],
        train_labels[train_idx:2 * train_idx]).send(gatway2)
    #gatway3_train_dataset = sy.BaseDataset(train_inputs[2*train_idx:3*train_idx], train_labels[2*train_idx:3*train_idx]).send(gatway3)
    #gatway4_train_dataset = sy.BaseDataset(train_inputs[3*train_idx:], train_labels[3*train_idx:]).send(gatway4)
    gatway1_test_dataset = sy.BaseDataset(test_inputs[:test_idx],
                                          test_labels[:test_idx]).send(gatway1)
    gatway2_test_dataset = sy.BaseDataset(test_inputs[test_idx:2 * test_idx],
                                          test_labels[test_idx:2 *
                                                      test_idx]).send(gatway2)
    #gatway3_test_dataset = sy.BaseDataset(test_inputs[2*test_idx:2*test_idx], test_labels[2*test_idx:3*test_idx]).send(gatway3)
    #gatway4_test_dataset = sy.BaseDataset(test_inputs[3*test_idx:], test_labels[3*test_idx:]).send(gatway3)

    # Create federated datasets, an extension of Pytorch TensorDataset class
    federated_train_dataset = sy.FederatedDataset([
        gatway1_train_dataset, gatway2_train_dataset
    ])  #,gatway3_train_dataset,gatway4_train_dataset])
    federated_test_dataset = sy.FederatedDataset(
        [gatway1_test_dataset,
         gatway2_test_dataset])  #,gatway3_test_dataset,gatway4_test_dataset])

    # Create federated dataloaders, an extension of Pytorch DataLoader class
    federated_train_loader = sy.FederatedDataLoader(federated_train_dataset,
                                                    shuffle=True,
                                                    batch_size=BATCH_SIZE)
    federated_test_loader = sy.FederatedDataLoader(federated_test_dataset,
                                                   shuffle=False,
                                                   batch_size=BATCH_SIZE)
    # Initialize the model
    model = Net(n_feature, n_class)

    #Initialize the SGD optimizer
    optimizer = optim.SGD(model.parameters(), lr=lr)

    for epoch in range(1, EPOCHS + 1):
        # Train on the training data in a federated way
        start1 = datetime.datetime.now()
        model = train(model, device, federated_train_loader, optimizer, epoch,
                      BATCH_SIZE, LOG_INTERVAL)
        end1 = datetime.datetime.now()
        print("Time Taken to train epoch %d is ", end1 - start1)
        if epoch == 1:
            total = end1 - start1
        else:
            total += end1 - start1
        # Check the test accuracy on unseen test data in a federated way
        start2 = datetime.datetime.now()
        test(model, device, federated_test_loader)
        end2 = datetime.datetime.now()
        print("Time Taken to test epoch %d is ", end2 - start2)
    print("Total training time = ", total)

    # Save the model
    torch.save(model.state_dict(), "binaize-threat-model_10.pt")
    # Reload the model in a new model object
    model_new = Net(n_feature, n_class)
    model_new.load_state_dict(torch.load("binaize-threat-model_10.pt"))
    model_new.eval()
    process = os.popen(
        "sudo scp -i /root/.ssh/id_rsa -o stricthostkeychecking=no binaize-threat-model_fully_trained.pt root@%s:/home/ayush/ADS/predict_workers"
        % (workers[0]))
    output = process.read()
    process = os.popen(
        "sudo scp -i /root/.ssh/id_rsa -o stricthostkeychecking=no binaize-threat-model_fully_trained.pt root@%s:/home/ayush/ADS/predict_workers"
        % (workers[1]))
    output = process.read()
    # Take the 122th record from the test data
    idx = 122
    data = test_inputs[idx]
    pred = model_new(data)
    pred_label = int(pred.argmax().data.cpu().numpy())
    pred_threat = encoder.inverse_transform([pred_label])[0]
    print("Predicted threat type : ", pred_threat)
    actual_label = int(test_labels[idx].data.cpu().numpy())
    actual_threat = encoder.inverse_transform([actual_label])[0]
    print("Actual threat type : ", actual_threat)

    # Take the 159th record from the test data
    idx = 159
    data = test_inputs[idx]
    pred = model_new(data)
    pred_label = int(pred.argmax().data.cpu().numpy())
    pred_threat = encoder.inverse_transform([pred_label])[0]
    print("Predicted threat type : ", pred_threat)
    actual_label = int(test_labels[idx].data.cpu().numpy())
    actual_threat = encoder.inverse_transform([actual_label])[0]
    print("Actual threat type : ", actual_threat)
Esempio n. 15
0
def main():
    args = define_and_get_arguments()
    hook = sy.TorchHook(torch)

    # 가상작업자(시뮬레이션) 사용시 이곳으로 분기
    if args.use_virtual:
        alice = VirtualWorker(id="alice", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="bob", hook=hook, verbose=args.verbose)
        charlie = VirtualWorker(id="charlie", hook=hook, verbose=args.verbose)
        list_of_object = [alice, bob, charlie]
    # 웹소켓작업자 사용시 이곳으로 분기
    else:
        base_port = 10002
        list_of_id = ["alice", "bob", "charlie"]
        list_of_ip = ["192.168.0.52", "192.168.0.53", "192.168.0.54"]
        list_of_object = []
        for index in range(len(list_of_id)):
            kwargs_websockest = {"id": list_of_id[index], "hook": hook}
            list_of_object.append(
                WebsocketClientWorker(host=list_of_ip[index],
                                      port=base_port,
                                      **kwargs_websockest))

    workers = list_of_object

    use_cuda = args.cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}

    federated_train_loader = sy.FederatedDataLoader(
        datasets.MNIST(
            "../data",
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ).federate(tuple(workers)),
        batch_size=args.batch_size,
        shuffle=True,
        iter_per_worker=True,
        **kwargs,
    )

    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST(
            "../data",
            train=False,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ),
        batch_size=args.test_batch_size,
        shuffle=True,
        **kwargs,
    )

    model = Net().to(device)

    for epoch in range(1, args.epochs + 1):
        logger.info("Starting epoch %s/%s", epoch, args.epochs)
        model = train(model, device, federated_train_loader, args.lr,
                      args.federate_after_n_batches)
        test(model, device, test_loader)

    if args.save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")
def main():
    args = define_and_get_arguments()

    hook = sy.TorchHook(torch)

    # 가상작업자(시뮬레이션) 사용시 이곳으로 분기
    if args.use_virtual:
        alice = VirtualWorker(id="alice", hook=hook, verbose=args.verbose)
        bob = VirtualWorker(id="bob", hook=hook, verbose=args.verbose)
        charlie = VirtualWorker(id="charlie", hook=hook, verbose=args.verbose)
    # 웹소켓작업자 사용시 이곳으로 분기
    else:
        a_kwargs_websocket = {"host": "192.168.0.57", "hook": hook}
        b_kwargs_websocket = {"host": "192.168.0.58", "hook": hook}
        c_kwargs_websocket = {"host": "192.168.0.59", "hook": hook}

        baseport = 10002
        alice = WebsocketClientWorker(id="alice",
                                      port=baseport,
                                      **a_kwargs_websocket)
        bob = WebsocketClientWorker(id="bob",
                                    port=baseport,
                                    **b_kwargs_websocket)
        charlie = WebsocketClientWorker(id="charlie",
                                        port=baseport,
                                        **c_kwargs_websocket)

    # 워커 객체를 리스트로 묶음
    workers = [alice, bob, charlie]

    # 쿠다 사용 여부
    use_cuda = args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}

    # 랜덤 시드 설정
    torch.manual_seed(args.seed)

    labels_resampled_factorized, obs_resampled_with_noise_2 = process_data()

    # percentage of test/valid set to use for testing and validation from the test_valid_idx (to be called test_size)
    test_size = 0.1

    # obtain training indices that will be used for validation
    num_train = len(obs_resampled_with_noise_2)
    indices = list(range(num_train))
    np.random.shuffle(indices)
    split = int(np.floor(test_size * num_train))
    train_idx, test_idx = indices[split:], indices[:split]

    federated_train_dataset = D.TensorDataset(
        torch.tensor(obs_resampled_with_noise_2[train_idx]),
        torch.tensor(labels_resampled_factorized[train_idx]))

    federated_train_loader = sy.FederatedDataLoader(
        federated_train_dataset.federate(tuple(workers)),
        batch_size=args.batch_size,
        shuffle=True,
        iter_per_worker=True,
        **kwargs,
    )

    test_dataset = D.TensorDataset(
        torch.tensor(obs_resampled_with_noise_2[test_idx]),
        torch.tensor(labels_resampled_factorized[test_idx]))

    test_loader = D.DataLoader(test_dataset,
                               shuffle=True,
                               batch_size=args.batch_size,
                               num_workers=0,
                               drop_last=True)

    model = Net(input_features=1, output_dim=5).to(device)
    criterion = nn.NLLLoss()

    for epoch in range(1, args.epochs + 1):
        logger.info("Starting epoch %s/%s", epoch, args.epochs)
        model = train(model,
                      device,
                      federated_train_loader,
                      args.lr,
                      args.federate_after_n_batches,
                      criterion=criterion)
        test(model,
             test_loader,
             args.batch_size,
             criterion=criterion,
             train_on_gpu=use_cuda)

    if args.save_model:
        torch.save(model.state_dict(), "./Model/mnist_cnn.pt")
Esempio n. 17
0
import torch
import syft as sy
from syft.workers.websocket_client import WebsocketClientWorker
from syft.exceptions import ObjectNotFoundError

if __name__ == "__main__":

    hook = sy.TorchHook(torch)
    bad = WebsocketClientWorker(id="bad",
                                port=8777,
                                host="localhost",
                                hook=hook)

    good = WebsocketClientWorker(id="good",
                                 port=8778,
                                 host="localhost",
                                 hook=hook)

    # When staring server, we already register a tensor
    # object with tab "test" in the server worker

    try:
        print("Search test with bad")
        data_pt = bad.search("test")
    except ObjectNotFoundError:
        print("Error: test is not found with bad worker!!!")

    print("Search test with good")
    data_pt = good.search("test")
    print("Test is found with good worker!!!")
    print("Here is the test data")
Esempio n. 18
0

# def inverse_transform(scalar, df, columns):
#     for col in columns:
#         df[col] = scaler.inverse_transform(df[col])
#     return df

bob_cl = '7'
alice_cl = '0'
target_cl = '15'
cluster = bob_cl
hook = sy.TorchHook(torch)
# bob = sy.VirtualWorker(hook, id="bob")
# alice = sy.VirtualWorker(hook, id="alice")
kwargs_websocket = {"host": "localhost", "hook": hook}
alice = WebsocketClientWorker(id='alice', port=8779, **kwargs_websocket)
bob = WebsocketClientWorker(id='bob', port=8778, **kwargs_websocket)
compute_nodes = [bob, alice]
bob_train, bob_test, bob_val = load_data(bob_cl)
alice_train, alice_test, alice_val = load_data(alice_cl)
target_train, target_test, target_val = load_data(target_cl)
remote_dataset = (list(), list())
train_distributed_dataset = []

for batch_idx, (data, target) in enumerate(bob_train):
    data = data.send(compute_nodes[0])
    target = target.send(compute_nodes[0])
    remote_dataset[0].append((data, target))

for batch_idx, (data, target) in enumerate(alice_train):
    data = data.send(compute_nodes[1])
Esempio n. 19
0
async def test_train_config_with_jit_trace_async(
        hook, start_proc):  # pragma: no cover
    kwargs = {
        "id": "async_fit",
        "host": "localhost",
        "port": 8777,
        "hook": hook
    }
    # data = torch.tensor([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]], requires_grad=True)
    # target = torch.tensor([[1.0], [1.0], [0.0], [0.0]], requires_grad=False)
    # dataset_key = "xor"
    data, target = utils.create_gaussian_mixture_toy_data(100)
    dataset_key = "gaussian_mixture"

    mock_data = torch.zeros(1, 2)

    # TODO check reason for error (RuntimeError: This event loop is already running) when starting websocket server from pytest-asyncio environment
    # dataset = sy.BaseDataset(data, target)

    # server, remote_proxy = start_remote_worker(id="async_fit", port=8777, hook=hook, dataset=(dataset, dataset_key))

    # time.sleep(0.1)

    remote_proxy = WebsocketClientWorker(**kwargs)

    @hook.torch.jit.script
    def loss_fn(pred, target):
        return ((target.view(pred.shape).float() - pred.float())**2).mean()

    class Net(torch.nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.fc1 = nn.Linear(2, 3)
            self.fc2 = nn.Linear(3, 2)
            self.fc3 = nn.Linear(2, 1)

        def forward(self, x):
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x

    model_untraced = Net()

    model = torch.jit.trace(model_untraced, mock_data)

    pred = model(data)
    loss_before = loss_fn(target=target, pred=pred)

    # Create and send train config
    train_config = sy.TrainConfig(model=model,
                                  loss_fn=loss_fn,
                                  batch_size=2,
                                  optimizer="SGD",
                                  optimizer_args={"lr": 0.1})
    train_config.send(remote_proxy)

    for epoch in range(5):
        loss = await remote_proxy.async_fit(dataset_key=dataset_key)
        if PRINT_IN_UNITTESTS:  # pragma: no cover
            print("-" * 50)
            print("Iteration %s: alice's loss: %s" % (epoch, loss))

    new_model = train_config.model_ptr.get()

    assert not (model.fc1._parameters["weight"]
                == new_model.obj.fc1._parameters["weight"]).all()
    assert not (model.fc2._parameters["weight"]
                == new_model.obj.fc2._parameters["weight"]).all()
    assert not (model.fc3._parameters["weight"]
                == new_model.obj.fc3._parameters["weight"]).all()
    assert not (model.fc1._parameters["bias"]
                == new_model.obj.fc1._parameters["bias"]).all()
    assert not (model.fc2._parameters["bias"]
                == new_model.obj.fc2._parameters["bias"]).all()
    assert not (model.fc3._parameters["bias"]
                == new_model.obj.fc3._parameters["bias"]).all()

    new_model.obj.eval()
    pred = new_model.obj(data)
    loss_after = loss_fn(target=target, pred=pred)
    if PRINT_IN_UNITTESTS:  # pragma: no cover
        print("Loss before training: {}".format(loss_before))
        print("Loss after training: {}".format(loss_after))

    remote_proxy.close()
    # server.terminate()

    assert loss_after < loss_before
Esempio n. 20
0
hook = sy.TorchHook(torch)

config = SearchConfig()

default_device = torch.device("cuda:0")

# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)

logger = utils.get_logger(
    os.path.join(config.path, "{}.log".format(config.name)))
config.print_params(logger.info)

kwargs_websocket = {"hook": hook, "host": "0.0.0.0"}
alice = WebsocketClientWorker(id="0", port=8777, **kwargs_websocket)
bob = WebsocketClientWorker(id="1", port=8778, **kwargs_websocket)
workers = [alice, bob]

for wcw in workers:
    wcw.clear_objects_remote()
    # hook.local_worker.add_worker(wcw)

remote_train_data = ([], [])
remote_valid_data = ([], [])


async def main():
    logger.info("Logger is set - training start")

    # set default gpu device id
Esempio n. 21
0
from torchvision import transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import syft as sy
from syft.workers.websocket_server import WebsocketServerWorker
from syft.workers.websocket_client import WebsocketClientWorker

# setup workers
hook = sy.TorchHook(torch)

#bob = sy.VirtualWorker(hook, id="bob")
#alice = sy.VirtualWorker(hook, id="alice")

rc1 = WebsocketClientWorker(host='localhost',
                            hook=hook,
                            id='0',
                            port=8182,
                            log_msgs=True,
                            verbose=True)
rc2 = WebsocketClientWorker(host='localhost',
                            hook=hook,
                            id='1',
                            port=8183,
                            log_msgs=True,
                            verbose=True)
rc3 = WebsocketClientWorker(host='localhost',
                            hook=hook,
                            id='2',
                            port=8184,
                            log_msgs=True,
                            verbose=True)
rc4 = WebsocketClientWorker(host='localhost',
Esempio n. 22
0
import torch
from torch import optim
import syft
from syft.workers.websocket_client import WebsocketClientWorker
hook = syft.TorchHook(torch)
# create a client worker mapping to the server worker in remote machine
remote_client = WebsocketClientWorker(
    host=
    '192.168.0.102',  # the host of remote machine, the same a the Server host
    hook=hook,
    id='liuwang',
    port=8182)
print('>>> remote_client', remote_client)

# get the data pointers which point to the real data in remote machine for training model locally
features = remote_client.search(["toy", "features"])
labels = remote_client.search(["toy", "labels"])
print('>>> x:', features)
print('>>> y:', labels)

# a toy model
model = torch.nn.Linear(2, 1)
remote_model = model.copy().send(remote_client)


def train(x, y, N) -> torch.nn.Module:
    # Training Logic
    opt = optim.SGD(params=remote_model.parameters(), lr=0.1)
    for iter in range(N):
        # 1) erase previous gradients (if they exist)
        opt.zero_grad()