def init_server(args, device, comm, rank, size, model, train_data_num, train_data_global, test_data_global): # aggregator client_num = size - 1 aggregator = FedAVGAggregator(train_data_global, test_data_global, train_data_num, client_num, device, model, args) # start the distributed training server_manager = FedAVGServerManager(args, comm, rank, size, aggregator) server_manager.run()
def init_server(args, device, comm, rank, size, model, train_data_num, train_data_global, test_data_global, train_data_local_dict, test_data_local_dict, train_data_local_num_dict): # aggregator worker_num = size - 1 aggregator = FedAVGAggregator(train_data_global, test_data_global, train_data_num, train_data_local_dict, test_data_local_dict, train_data_local_num_dict, worker_num, device, model, args) # start the distributed training server_manager = FedAVGServerManager(args, aggregator, comm, rank, size) server_manager.send_init_msg() server_manager.run()
config=args ) # Set the random seed. The np.random seed determines the dataset partition. # The torch_manual_seed determines the initial weight. # We fix these two, so that we can reproduce the result. np.random.seed(0) torch.manual_seed(10) # GPU 0 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # load data dataset = load_data(args, args.dataset) [train_data_num, test_data_num, train_data_global, test_data_global, train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num] = dataset # create model. # Note if the model is DNN (e.g., ResNet), the training will be very slow. # In this case, please use our FedML distributed version (./fedml_experiments/distributed_fedavg) model = create_model(args, model_name=args.model, output_dim=dataset[7]) aggregator = FedAVGAggregator(train_data_global, test_data_global, train_data_num, train_data_local_dict, test_data_local_dict, train_data_local_num_dict, args.client_num_per_round, device, model, args) size = args.client_num_per_round + 1 server_manager = FedAVGServerManager(args, aggregator, rank=0, size=size, backend="MQTT") server_manager.run() app.run(host='127.0.0.1', port=5000, debug=False)