def init_trainer(): client_comm = Streamer(ModelReceiver()) # --> client_id, model_states, sample_size, round_id client_comm.add_pipe( pipes.ToModel(lambda: LogisticRegression(28 * 28, 10), model_pos=1)) # --> client_id, lr_models, sample_size, round_id client_comm.add_pipe( pipes.TrainModel(epochs=10, optimizer_method='sgd', data_streamer=mnist_streamer, criterion=nn.CrossEntropyLoss())) # --> client_id, trained_lr_model_states,sample_size, round_id client_comm.add_pipe( pipes.ToModel(lambda: LogisticRegression(28 * 28, 10), model_pos=1)) # --> client_id, trained_lr_model,sample_size, round_id client_comm.add_pipe( pipes.Infer(model_pos=1, criterion=nn.CrossEntropyLoss(), test_data=mnist_streamer.next(id=100)[0])) # --> client_id, trained_lr_model,sample_size, round_id client_comm.add_pipe(pipes.ToModelDictState(model_pos=1)) # --> client_id, trained_lr_model_states,sample_size, round_id client_comm.add_pipe(pipes.ToServer()) # --> client_id, trained_lr_model_states,sample_size, round_id return client_comm
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset == "mnist": logging.info("LogisticRegression + MNIST") model = LogisticRegression(28 * 28, output_dim) elif model_name == "cnn" and args.dataset == "femnist": logging.info("CNN + FederatedEMNIST") model = CNN_DropOut(False) elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100": logging.info("ResNet18_GN + Federated_CIFAR100") model = resnet18() elif model_name == "rnn" and args.dataset == "shakespeare": logging.info("RNN + shakespeare") model = RNN_OriginalFedAvg() elif model_name == "rnn" and args.dataset == "fed_shakespeare": logging.info("RNN + fed_shakespeare") model = RNN_OriginalFedAvg() elif model_name == "lr" and args.dataset == "stackoverflow_lr": logging.info("lr + stackoverflow_lr") model = LogisticRegression(10000, output_dim) elif model_name == "rnn" and args.dataset == "stackoverflow_nwp": logging.info("RNN + stackoverflow_nwp") model = RNN_StackOverFlow() elif model_name == "resnet56": model = resnet56(class_num=output_dim) elif model_name == "mobilenet": model = mobilenet(class_num=output_dim) return model
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset in ["mnist", "fmnist", "emnist"]: logging.info("LogisticRegression + MNIST") model = LogisticRegression(28 * 28, output_dim, flatten=True) elif model_name == "cnn" and args.dataset in ["mnist", "fmnist", "emnist"]: if args.dataset in ["mnist", "fmnist"]: logging.info("CNN + MNIST") model = CNN_DropOut(True) elif args.dataset == "emnist": logging.info("CNN + MNIST") model = CNN_DropOut(only_digits=47) elif model_name == "cnn" and args.dataset in ["har", "har_subject"]: logging.info("CNN + HAR") model = HAR_CNN(data_size=(9, 128), n_classes=6) elif model_name == "cnn" and args.dataset == "femnist": logging.info("CNN + FederatedEMNIST") model = CNN_DropOut(False) elif model_name == "cnn" and args.dataset == "cifar10": logging.info("CNN + CIFAR10") model = CNNCifar() elif model_name == "purchasemlp": if args.dataset == "purchase100": model = PurchaseMLP(input_dim=600, n_classes=100) elif model_name == "texasmlp": if args.dataset == "texas100": model = TexasMLP(input_dim=6169, n_classes=100) elif model_name == 'lr' and args.dataset == "adult": model = LogisticRegression(105, 2, flatten=False) elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100": logging.info("ResNet18_GN + Federated_CIFAR100") model = resnet18() elif model_name == "rnn" and args.dataset == "shakespeare": logging.info("RNN + shakespeare") model = RNN_OriginalFedAvg() elif model_name == "rnn" and args.dataset == "fed_shakespeare": logging.info("RNN + fed_shakespeare") model = RNN_OriginalFedAvg() elif model_name == "lr" and args.dataset == "stackoverflow_lr": logging.info("lr + stackoverflow_lr") model = LogisticRegression(10000, output_dim) elif model_name == "rnn" and args.dataset == "stackoverflow_nwp": logging.info("RNN + stackoverflow_nwp") model = RNN_StackOverFlow() elif model_name == "resnet56": model = resnet56(class_num=output_dim) elif model_name == "vgg11": model = VGG("VGG11") elif model_name == "resnet20": if args.dataset == "cifar10": model = resnet20_cifar(num_classes=10) elif args.dataset == "chmnist": model = resnet20_cifar(num_classes=8) elif model_name == "mobilenet": model = mobilenet(class_num=output_dim) return model
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset == "mnist": logging.info("LogisticRegression + MNIST") model = LogisticRegression(28 * 28, output_dim) elif model_name == "cnn" and args.dataset == "femnist": logging.info("CNN + FederatedEMNIST") model = CNN_DropOut(False) elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100": logging.info("ResNet18_GN + Federated_CIFAR100") model = resnet18() elif model_name == "rnn" and args.dataset == "shakespeare": logging.info("RNN + shakespeare") model = RNN_OriginalFedAvg() elif model_name == "rnn" and args.dataset == "fed_shakespeare": logging.info("RNN + fed_shakespeare") model = RNN_OriginalFedAvg() elif model_name == "lr" and args.dataset == "stackoverflow_lr": logging.info("lr + stackoverflow_lr") model = LogisticRegression(10004, output_dim) elif model_name == "rnn" and args.dataset == "stackoverflow_nwp": logging.info("CNN + stackoverflow_nwp") model = RNN_StackOverFlow() elif model_name == "resnet56": model = resnet56(class_num=output_dim) elif model_name == "mobilenet": model = mobilenet(class_num=output_dim) # TODO elif model_name == 'mobilenet_v3': '''model_mode \in {LARGE: 5.15M, SMALL: 2.94M}''' model = MobileNetV3(model_mode='LARGE', num_classes=output_dim) elif model_name == 'efficientnet': # model = EfficientNet() efficientnet_dict = { # Coefficients: width,depth,res,dropout 'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), } # default is 'efficientnet-b0' model = EfficientNet.from_name(model_name='efficientnet-b0', num_classes=output_dim) # model = EfficientNet.from_pretrained(model_name='efficientnet-b0') else: raise NotImplementedError return model
def main(): ROOT_PATH = args.root_path RUN_NAME = str(args.mode) + \ "-id" + str(args.run_id) + \ "-group_id" + str(args.group_id) + \ "-n" + str(args.client_number) + \ "-symm" + str(args.b_symmetric) + \ "-tu" + str(args.topology_neighbors_num_undirected) + \ "-td" + str(args.topology_neighbors_num_directed) + \ "-lr" + str(args.learning_rate), wandb.init(project="fedml", name=str(RUN_NAME), config=args) logging.basicConfig(level=logging.INFO, format=' - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') logging.info('Decentralized Online Learning.') # fix random seeds seed = 1234 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) # create id list client_number = args.client_number client_id_list = [i for i in range(client_number)] print(client_id_list) # load data iteration_number_T = args.iteration_number sample_num_in_total = client_number * iteration_number_T beta = args.beta data_name = args.data_name data_path = "" if data_name == "SUSY": data_path = ROOT_PATH + "SUSY/SUSY.csv" input_dim = 18 elif data_name == "RO": data_path = ROOT_PATH + "room_occupancy/datatraining.txt" input_dim = 5 else: input_dim = 5 data_loader = DataLoader(data_name, data_path, client_id_list, sample_num_in_total, beta) streaming_data = data_loader.load_datastream() # create model model = LogisticRegression(input_dim, args.output_dim) model_cache = LogisticRegression(input_dim, args.output_dim) # start training FedML_decentralized_fl(client_number, client_id_list, streaming_data, model, model_cache, args)
def main(): # test the data loader # Hyper Parameters input_size = 784 num_classes = 10 num_epochs = 50 batch_size = 10 learning_rate = 0.03 np.random.seed(0) torch.manual_seed(10) device = torch.device("cuda:0") client_num, train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_mnist(batch_size) model = LogisticRegression(input_size, num_classes).to(device) # Loss and Optimizer # Softmax is internally computed. # Set parameters to be updated. criterion = nn.CrossEntropyLoss().to(device) optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Training the Model for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_data_global): images = images.to(device) labels = labels.to(device) # Forward + Backward + Optimize optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() # if (i + 1) % 100 == 0: # print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f' # % (epoch + 1, num_epochs, i + 1, len(train_data_global), loss.item())) # Test the Model correct = 0 total = 0 for x, labels in test_data_global: x = x.to(device) labels = labels.to(device) outputs = model(x) _, predicted = torch.max(outputs.data, -1) total += labels.size(0) correct += (predicted == labels).sum() # 52% in the last round print('Accuracy of the model: %d %%' % (100 * correct // total))
def test_on_all_sub_models(self, round_idx: int): print("####round: " + str(round_idx) + "####") round_key = "round_" + str(round_idx) train_stats, test_stats = self.test_model_on_all_clients( self.model, round_idx) print("round", round_idx) print("global_model_train", train_stats) print("global_model_test", test_stats) self.model_influence[round_key] = {} self.model_influence[round_key]["."] = { "train_stats": train_stats, "test_stats": test_stats } print("model[.]:", train_stats) for idx in self.model_dict: temp_model_dict = dict(self.model_dict) del temp_model_dict[idx] model_params = self.aggregate_models(temp_model_dict) sub_model = LogisticRegression(28 * 28, 10) # sub_model = self.cached_model sub_model.load_state_dict(model_params) train_stats, test_stats = self.test_model_on_all_clients( sub_model, round_idx) print("model[" + str(idx) + "]:", train_stats) print("model[" + str(idx) + "].train", train_stats) print("model[" + str(idx) + "].test", test_stats) influence = self._influence(sub_model) print("model[" + str(idx) + "].influence", influence) influence_no_real, influence_both, influence_original = influence influence_ecl = self._influence_ecl(sub_model) self.model_influence[round_key][str(idx)] = {} # self.model_influence[round_key][str(idx)]["test_stats"] = train_stats self.model_influence[round_key][str( idx)]["train_stats"] = train_stats self.model_influence[round_key][str( idx)]["influence_no_real"] = influence_no_real self.model_influence[round_key][str( idx)]["influence_real"] = influence_both self.model_influence[round_key][str( idx)]["influence_ecl"] = influence_ecl.numpy() # print("influence[" + str(idx) + "]", influence) # print("euclidean_influence[" + str(idx) + "]", self._influence_ecl(sub_model)) # plotter.append(influence) print("####end of round: " + str(round_idx) + "####") # plotter.save("round_" + str(round_idx)) # self.log_cache.save() return ""
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset == "mnist": logging.info("LogisticRegression + MNIST") model = LogisticRegression(28 * 28, output_dim) elif model_name == "cnn" and args.dataset == "femnist": logging.info("CNN + FederatedEMNIST") model = CNN_DropOut(False) elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100": logging.info("ResNet18_GN + Federated_CIFAR100") model = resnet18() elif model_name == "rnn" and args.dataset == "shakespeare": logging.info("RNN + shakespeare") model = RNN_OriginalFedAvg() elif model_name == "rnn" and args.dataset == "fed_shakespeare": logging.info("RNN + fed_shakespeare") model = RNN_OriginalFedAvg() elif model_name == "lr" and args.dataset == "stackoverflow_lr": logging.info("lr + stackoverflow_lr") model = LogisticRegression(10004, output_dim) elif model_name == "rnn" and args.dataset == "stackoverflow_nwp": logging.info("CNN + stackoverflow_nwp") model = RNN_StackOverFlow() elif model_name == "resnet20": model = resnet20(class_num=output_dim) elif model_name == "resnet38": try: # logging.info('Test model!!!!!!!!!!!!!!!!!!!!!!!!!!') model = resnet38(class_num=output_dim) logging.info(str(model)) except Exception as error: logging.info(str(error)) elif model_name == "resnet74": model = resnet74(class_num=output_dim) elif model_name == "resnet110": model = resnet110(class_num=output_dim) elif model_name == "resnet18_imagenet": model = resnet18_imagenet(num_classes=output_dim) elif model_name == "mobilenet": model = mobilenet(class_num=output_dim) elif model_name == 'mlp_fedcom': model = MLP_fedcom() elif model_name == 'mobilenetv2': model = MobileNetV2(num_classes=output_dim) return model
def build(self): for index, client_idx in enumerate(database_clients): data = SQLDataProvider(args()).cache(client_idx) model = LogisticRegression(28 * 28, 10) trained = train(model, data.batch(8)) self.data_dict[client_idx] = data self.model_stats[client_idx] = trained self.models[client_idx] = model self.sample_dict[client_idx] = len(data) print("model accuracy:", infer(model, self.test_data.batch(8)))
def test_a_case(test_case, title='start evaluation'): print('-----------------' + title + '-----------------') global_model_stats = aggregate(dict_select(test_case, model_stats), dict_select(test_case, sample_dict)) global_model = LogisticRegression(28 * 28, 10) load(global_model, global_model_stats) print("test case:", test_case) acc_loss = infer(global_model, test_data.batch(8)) print("global model accuracy:", acc_loss[0]) print("global model loss:", acc_loss[1]) print("----------------------------------------------------")
def test_selection_accuracy(self, client_idx, title='test accuracy', output=True): print('-----------------' + title + '-----------------') global_model_stats = tools.aggregate(tools.dict_select(client_idx, self.model_stats), tools.dict_select(client_idx, self.sample_dict)) global_model = LogisticRegression(28 * 28, 10) tools.load(global_model, global_model_stats) acc_loss = tools.infer(global_model, self.test_data.batch(8)) if output: print("test case:", client_idx) print("global model accuracy:", acc_loss[0], 'loss:', acc_loss[1]) return acc_loss
def test_selection_accuracy(self, client_idx, title='test accuracy'): print('-----------------' + title + '-----------------') global_model_stats = aggregate(dict_select(client_idx, self.model_stats), dict_select(client_idx, self.sample_dict)) global_model = LogisticRegression(28 * 28, 10) load(global_model, global_model_stats) print("test case:", client_idx) acc_loss = infer(global_model, self.test_data.batch(8)) print("global model accuracy:", acc_loss[0]) print("global model loss:", acc_loss[1]) return acc_loss
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset == "synthetic_1_1": logging.info("LogisticRegression + synthetic_1_1") model = LogisticRegression(60, output_dim) elif model_name == "vgg" and args.dataset == "cifar10": logging.info("VGG11 + cifar10") model = vgg11() else: raise("Model not added!") return model
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset == "mnist": logging.info("LogisticRegression + MNIST") model = LogisticRegression(28 * 28, output_dim) elif model_name == "rnn" and args.dataset == "shakespeare": logging.info("RNN + shakespeare") model = RNN_OriginalFedAvg() elif model_name == "cnn" and args.dataset == "femnist": logging.info("CNN + FederatedEMNIST") model = CNN_DropOut(False) elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100": logging.info("ResNet18_GN + Federated_CIFAR100") model = resnet18() elif model_name == "rnn" and args.dataset == "fed_shakespeare": logging.info("RNN + fed_shakespeare") model = RNN_OriginalFedAvg() elif model_name == "lr" and args.dataset == "stackoverflow_lr": logging.info("lr + stackoverflow_lr") model = LogisticRegression(10004, output_dim) elif model_name == "rnn" and args.dataset == "stackoverflow_nwp": logging.info("CNN + stackoverflow_nwp") model = RNN_StackOverFlow() elif model_name == "resnet56": model = resnet56(class_num=output_dim) elif model_name == "mobilenet": model = mobilenet(class_num=output_dim) elif model_name == "mobilenet_v2": model = models.mobilenet_v2() # TODO elif model_name == 'mobilenet_v3': '''model_mode \in {LARGE: 5.15M, SMALL: 2.94M}''' model = MobileNetV3(model_mode='LARGE') elif model_name == 'efficientnet': model = EfficientNet() return model
def build(self, test_models=False, round_idx=0): print("Building Models --Started") for index, client_idx in enumerate(self.all_clients): data = SQLDataProvider(args()).cache(client_idx) model = LogisticRegression(28 * 28, 10) trained = tools.train(model, data.batch(8)) self.data_dict[client_idx] = data self.model_stats[client_idx] = trained self.models[client_idx] = model self.sample_dict[client_idx] = len(data) if test_models: print("model accuracy:", tools.infer(model, self.test_data.batch(8))) print("Building Models --Finished")
def init_server(): server_comm = Streamer(ModelReceiver()) # --> client_id, model_states, sample_size, round_id server_comm.add_pipe( pipes.ToModel(lambda: LogisticRegression(28 * 28, 10), model_pos=1)) # --> client_id, model, sample_size, round_id server_comm.add_pipe(pipes.Collect(size=2)) # --> [(client_id, lr_model, sample_size)*size], round_id server_comm.add_pipe(pipes.AvgAggregator()) # --> global_model_state, training_size, round_id server_comm.add_pipe( pipes.ToModel(lambda: LogisticRegression(28 * 28, 10), model_pos=0)) # --> global_model, training_size, round_id server_comm.add_pipe( pipes.Infer(model_pos=0, criterion=nn.CrossEntropyLoss(), test_data=mnist_streamer.next(id=100)[0])) # --> global_model, training_size, round_id server_comm.add_pipe(pipes.ToModelDictState(model_pos=0)) # --> global_model_state, training_size, round_id server_comm.add_pipe(pipes.ToRandomClients([1, 2, 3], nb_client=2)) # --> global_model_state, training_size, round_id + 1 return server_comm
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset == "mnist": model = LogisticRegression(28 * 28, output_dim) args.client_optimizer = "sgd" elif model_name == "rnn" and args.dataset == "shakespeare": model = RNN_OriginalFedAvg(28 * 28, output_dim) args.client_optimizer = "sgd" elif model_name == "resnet56": model = resnet56(class_num=output_dim) elif model_name == "mobilenet": model = mobilenet(class_num=output_dim) return model
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "cnn" and args.dataset == "femnist": logging.info("CNN + FederatedEMNIST") model = CNN_DropOut(False) elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100": logging.info("ResNet18_GN + Federated_CIFAR100") model = resnet18() elif model_name == "rnn" and args.dataset == "fed_shakespeare": logging.info("RNN + fed_shakespeare") model = RNN_OriginalFedAvg() elif model_name == "lr" and args.dataset == "stackoverflow_lr": logging.info("lr + stackoverflow_lr") model = LogisticRegression(10004, output_dim) elif model_name == "rnn" and args.dataset == "stackoverflow_nwp": logging.info("RNN + stackoverflow_nwp") model = RNN_StackOverFlow() else: raise Exception("No model named {} scripted!"%model_name) return model
def initiate_federated(): for i in range(context.comm.size()): context.comm.send(i + 1, LogisticRegression(28 * 28, 10))
mnist_streamer = Streamer(mnist_source) mnist_streamer.add_pipe(pipes.MnistDataParser()) mnist_streamer.add_pipe(pipes.ToTensor()) mnist_streamer.add_pipe(pipes.Batch(5)) def initiate_federated(): for i in range(context.comm.size()): context.comm.send(i + 1, LogisticRegression(28 * 28, 10)) if context.comm.pid() == 0: server_comm = Streamer(ModelReceiver()) # --> client_id, model_states, sample_size, round_id server_comm.add_pipe( pipes.ToModel(lambda: LogisticRegression(28 * 28, 10))) # --> client_id, model, sample_size, round_id server_comm.add_pipe(pipes.Collect(size=2)) # --> [(client_id, lr_model, sample_size)*size], round_id server_comm.add_pipe(pipes.AvgAggregator()) # --> global_model, training_size, round_id server_comm.add_pipe( pipes.Infer(model_pos=0, criterion=nn.CrossEntropyLoss())) # --> global_model, training_size, round_id server_comm.add_pipe(pipes.ToRandomClients([1, 2, 3], nb_client=2)) # --> global_model, training_size, round_id server_comm.run(init=initiate_federated, context=context) else: client_comm = Streamer(ModelReceiver()) # --> client_id, model_states, sample_size, round_id
return sim def heatmap(dct): matrix = [] for key, data in dct.items(): simi = similarities(data.y, dct) matrix.append(simi) print(matrix) test_data = SQLDataProvider(args()).cache(99999) print("test data:", test_data.y) for index, client_idx in enumerate(database_clients): data = SQLDataProvider(args()).cache(client_idx) model = LogisticRegression(28 * 28, 10) trained = train(model, data.batch(8)) data_dict[client_idx] = data model_stats[client_idx] = trained models[client_idx] = model sample_dict[client_idx] = len(data) print("model accuracy:", infer(model, test_data.batch(8))) def test_a_case(test_case, title='start evaluation'): print('-----------------' + title + '-----------------') global_model_stats = aggregate(dict_select(test_case, model_stats), dict_select(test_case, sample_dict)) global_model = LogisticRegression(28 * 28, 10) load(global_model, global_model_stats) print("test case:", test_case) acc_loss = infer(global_model, test_data.batch(8))
def create_model(output_dim): logging.info("create an lr model") return LogisticRegression(28 * 28, output_dim)
def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset in ["mnist", "fmnist", "emnist"]: logging.info("LogisticRegression + MNIST") model = LogisticRegression(28 * 28, output_dim, flatten=True) elif model_name == "cnn" and args.dataset in ["mnist", "fmnist", "emnist"]: if args.dataset in ["mnist", "fmnist"]: logging.info("CNN + MNIST") model = CNN_DropOut(True) elif args.dataset == "emnist": logging.info("CNN + MNIST") model = CNN_DropOut(only_digits=47) elif model_name == "cnn" and args.dataset == "har": logging.info("CNN + HAR") # model = init_specific_model("Cnn1", data_size=(9, 128), num_classes=6) model = HAR_CNN(data_size=(9, 128), n_classes=6) elif model_name == "cnn" and args.dataset == "femnist": logging.info("CNN + FederatedEMNIST") model = CNN_DropOut(False) elif model_name == "purchasemlp": if args.dataset == "purchase100": model = PurchaseMLP(input_dim=600, n_classes=100) elif model_name == "texasmlp": if args.dataset == "texas100": model = TexasMLP(input_dim=6169, n_classes=100) elif model_name == 'lr' and args.dataset == "adult": model = LogisticRegression(105, 2, flatten=False) elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100": logging.info("ResNet18_GN + Federated_CIFAR100") model = resnet18() elif model_name == "rnn" and args.dataset == "shakespeare": logging.info("RNN + shakespeare") model = RNN_OriginalFedAvg() elif model_name == "rnn" and args.dataset == "fed_shakespeare": logging.info("RNN + fed_shakespeare") model = RNN_OriginalFedAvg() elif model_name == "lr" and args.dataset == "stackoverflow_lr": logging.info("lr + stackoverflow_lr") model = LogisticRegression(10004, output_dim) elif model_name == "rnn" and args.dataset == "stackoverflow_nwp": logging.info("CNN + stackoverflow_nwp") model = RNN_StackOverFlow() elif model_name == "resnet56": model = resnet56(class_num=output_dim) elif model_name == "vgg11": model = VGG("VGG11") elif model_name == "resnet20": if args.dataset == "cifar10": model = resnet20_cifar(num_classes=10) elif args.dataset == "chmnist": model = resnet20_cifar(num_classes=8) elif model_name == "mobilenet": model = mobilenet(class_num=output_dim) elif model_name == 'mobilenet_v3': '''model_mode \in {LARGE: 5.15M, SMALL: 2.94M}''' model = MobileNetV3(model_mode='LARGE', num_classes=output_dim) elif model_name == 'efficientnet': # model = EfficientNet() efficientnet_dict = { # Coefficients: width,depth,res,dropout 'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), } # default is 'efficientnet-b0' model = EfficientNet.from_name(model_name='efficientnet-b0', num_classes=output_dim) return model
def initiate_federated(): for i in range(context.comm.size()): states = LogisticRegression(28 * 28, 10).state_dict() context.comm.send(i + 1, (i, states, 0, 0))