Esempio n. 1
0
def run():

    experiments_raw = json.loads(args.hp)

    hp_dicts = [
        hp for x in experiments_raw for hp in xpm.get_all_hp_combinations(x)
    ][args.start:args.end]
    if args.reverse_order:
        hp_dicts = hp_dicts[::-1]
    experiments = [xpm.Experiment(hyperparameters=hp) for hp in hp_dicts]

    print("Running {} Experiments..\n".format(len(experiments)))
    for xp_count, experiment in enumerate(experiments):
        run_experiment(experiment, xp_count, len(experiments))
Esempio n. 2
0
def run():
    with open('federated_learning.json') as data_file:
        experiments_raw = json.load(data_file)[args.schedule]

    hp_dicts = [
        hp for x in experiments_raw for hp in xpm.get_all_hp_combinations(x)
    ][args.start:args.end]
    if args.reverse_order:
        hp_dicts = hp_dicts[::-1]
    experiments = [xpm.Experiment(hyperparameters=hp) for hp in hp_dicts]

    print("Running {} Experiments..\n".format(len(experiments)))
    for xp_count, experiment in enumerate(experiments):
        run_experiment(experiment, xp_count, len(experiments))
Esempio n. 3
0
print("Torch Version: ", torch.__version__)
device = 'cuda' if torch.cuda.is_available() else 'cpu'

args = parser.parse_args()

# Load the Hyperparameters of all Experiments to be performed and set up the Experiments
with open('federated_learning.json') as data_file:
    experiments_raw = json.load(data_file)[args.schedule]

hp_dicts = [
    hp for x in experiments_raw for hp in xpm.get_all_hp_combinations(x)
][args.start:args.end]
if args.reverse_order:
    hp_dicts = hp_dicts[::-1]
experiments = [xpm.Experiment(hyperparameters=hp) for hp in hp_dicts]


def run_experiments(experiments):
    print("Running {} Experiments..\n".format(len(experiments)))
    for xp_count, xp in enumerate(experiments):
        hp = dhp.get_hp(xp.hyperparameters)
        xp.prepare(hp)
        print(xp)

        # Load the Data and split it among the Clients
        client_loaders, train_loader, test_loader, stats = data_utils.get_data_loaders(
            hp)

        # Instantiate Clients and Server with Neural Net
        net = getattr(neural_nets, hp['net'])
Esempio n. 4
0
    def experiment_wrapper(cfg, seed, instance, budget, **kwargs):

        model_name = hashlib.sha1(
            json.dumps(cfg._values, sort_keys=True).encode()).hexdigest()
        files = glob.glob(f"{args.CHECKPOINT_PATH}/*-{model_name}.pth")
        if files:
            oldbudget = int(files[0].split('/')[-1].split('-')[0])
        else:
            oldbudget = 0

        budget = max(0, int(budget) - oldbudget)
        hp = {
            "dataset":
            "mnist",
            "distill_dataset":
            "emnist",
            "net":
            "lenet_mnist",
            "n_clients":
            20,
            "classes_per_client":
            0.01,
            "communication_rounds":
            budget,
            "participation_rate":
            0.4,
            "local_epochs":
            cfg["local_epochs"],
            "distill_epochs":
            cfg["distill_epochs"],
            "n_distill":
            100000,
            "local_optimizer": [
                cfg["local_optimizer"], {
                    "lr": cfg[f"{cfg['local_optimizer'].lower()}_lr"]
                }
            ],
            "distill_optimizer": ["Adam", {
                "lr": 0.001
            }],
            "fallback":
            cfg["fallback"],
            "lambda_outlier":
            cfg["lambda_outlier"],
            "lambda_fedprox":
            cfg["lambda_fedprox"],
            "only_train_final_outlier_layer":
            False,
            "warmup_type":
            "constant",
            "mixture_coefficients": {
                "base": cfg["mixture_coefficients_base"],
                "public": 1 - cfg["mixture_coefficients_base"]
            },
            "batch_size":
            256,
            "distill_mode":
            "logits_weighted_with_deep_outlier_score",
            "aggregation_mode":
            "FAD+P+S",
            "pretrained":
            f"{oldbudget}-{model_name}.pth",
            "save_model":
            f"{budget}-{model_name}.pth",
            "log_frequency":
            -100,
            "log_path":
            "trash/",
            "job_id": ['']
        }

        # load already trained model if available

        #hp.update(kwargs)
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore')
            experiment = xpm.Experiment(hyperparameters=hp)
            return run_experiment(experiment, 0, 1, cmdargs, seed=seed)