client_lr = 0.0316 server_opt = "Yogi" client_opt = "SGD" client_opt_strategy = "reinit" # image_norm = "tflike" # TODO a paraméterek helytelen nevére nem adott hibát config = TorchFederatedLearnerCIFAR100Config( BREAK_ROUND=1500, CLIENT_LEARNING_RATE=client_lr, CLIENT_OPT=client_opt, # CLIENT_OPT_ARGS=common.get_args(client_opt), CLIENT_OPT_L2=1e-4, CLIENT_OPT_STRATEGY=client_opt_strategy, SERVER_OPT=server_opt, SERVER_OPT_ARGS=common.get_args(server_opt), SERVER_LEARNING_RATE=server_lr, IS_IID_DATA=is_iid, BATCH_SIZE=B, CLIENT_FRACTION=C, N_CLIENTS=NC, N_EPOCH_PER_CLIENT=E, MAX_ROUNDS=max_rounds, IMAGE_NORM="recordwisefull", NORM="group", INIT="tffed", AUG="basicf") config_technical = TorchFederatedLearnerTechnicalConfig(BREAK_ROUND=300, EVAL_ROUND=100) name = f"{config.SERVER_OPT}: {config.SERVER_LEARNING_RATE} - {config.CLIENT_OPT_STRATEGY} - {config.CLIENT_OPT}: {config.CLIENT_LEARNING_RATE}" experiment = Experiment(workspace="federated-learning", project_name=project_name) common.do_training(experiment, name, config, config_technical)
# TODO a paraméterek helytelen nevére nem adott hibát for l2 in [1e-3, 1e-2, 1e-1, 1e-1, 1e1]: s_opt_args = common.get_args(server_opt) s_opt_args["weight_decay"] = l2 config = TorchFederatedLearnerCIFAR100Config( BREAK_ROUND=300, CLIENT_LEARNING_RATE=client_lr, CLIENT_OPT=client_opt, CLIENT_OPT_ARGS=common.get_args(client_opt), CLIENT_OPT_L2=l2, CLIENT_OPT_STRATEGY=client_opt_strategy, SERVER_OPT=server_opt, SERVER_OPT_ARGS=s_opt_args, SERVER_LEARNING_RATE=server_lr, IS_IID_DATA=is_iid, BATCH_SIZE=B, CLIENT_FRACTION=C, N_CLIENTS=NC, N_EPOCH_PER_CLIENT=E, MAX_ROUNDS=max_rounds, DL_N_WORKER=0, NORM="group", # IMAGE_NORM=image_norm, INIT="tffed", ) config_technical = TorchFederatedLearnerTechnicalConfig(HIST_SAMPLE=0) name = f"{config.SERVER_OPT}: {config.SERVER_LEARNING_RATE} - {config.CLIENT_OPT_STRATEGY} - {config.CLIENT_OPT}: {config.CLIENT_LEARNING_RATE}" experiment = Experiment(workspace="federated-learning", project_name=project_name) common.do_training(experiment, name, config, config_technical)
] config_changes = [ ("SGD", "SGD", 1, 0.1, "nothing"), ("Yogi", "SGD", 0.1, 0.01, "nothing"), ("Yogi", "Yogi", 0.1, 0.0001, "avg"), ("Yogi", "Yogi", 0.1, 0.0001, "reinit"), ("Yogi", "Yogi", 0.1, 0.0001, "nothing"), ] for values in config_changes: config = TorchFederatedLearnerCIFAR100Config( BREAK_ROUND=1500, CLIENT_OPT_L2=1e-4, IS_IID_DATA=is_iid, BATCH_SIZE=B, CLIENT_FRACTION=C, N_CLIENTS=NC, N_EPOCH_PER_CLIENT=E, MAX_ROUNDS=max_rounds, IMAGE_NORM="recordwisefull", NORM="group", INIT="tffed", AUG="flipf", ) for k, v in zip(param_names, values): setattr(config, k, v) config_technical = TorchFederatedLearnerTechnicalConfig( SAVE_CHP_INTERVALL=5) name = f"{config.SERVER_OPT}: {config.SERVER_LEARNING_RATE} - {config.CLIENT_OPT_STRATEGY} - {config.CLIENT_OPT}: {config.CLIENT_LEARNING_RATE}" experiment = Experiment(workspace="federated-learning", project_name=project_name) common.do_training(experiment, name, config, config_technical)
# image_norm = "tflike" # TODO a paraméterek helytelen nevére nem adott hibát for init in ["tffed"]: #TorchInitRepo.get_opt_names(): config = TorchFederatedLearnerCIFAR100Config( BREAK_ROUND=300, CLIENT_LEARNING_RATE=client_lr, CLIENT_OPT=client_opt, # CLIENT_OPT_ARGS=common.get_args(client_opt), # CLIENT_OPT_L2=1e-4, CLIENT_OPT_STRATEGY=client_opt_strategy, SERVER_OPT=server_opt, SERVER_OPT_ARGS={ "betas": (0.9, 0.999), "initial_accumulator": 0.0 }, SERVER_LEARNING_RATE=server_lr, IS_IID_DATA=is_iid, BATCH_SIZE=B, CLIENT_FRACTION=C, N_CLIENTS=NC, N_EPOCH_PER_CLIENT=E, MAX_ROUNDS=max_rounds, DL_N_WORKER=0, NORM="group", # IMAGE_NORM=image_norm, INIT=init, ) config_technical = TorchFederatedLearnerTechnicalConfig(HIST_SAMPLE=0) name = f"{config.SERVER_OPT}: {config.SERVER_LEARNING_RATE} - {config.CLIENT_OPT_STRATEGY} - {config.CLIENT_OPT}: {config.CLIENT_LEARNING_RATE}" experiment = Experiment(workspace="federated-learning", project_name=project_name)