def create_Scenario(request): dataset = request.param[0]() samples_split_option = request.param[1] corruption = request.param[2] params = {"dataset": dataset} params.update({ "partners_count": 3, "amounts_per_partner": [0.3, 0.5, 0.2], "samples_split_option": samples_split_option, "corruption_parameters": corruption, }) params.update({ "contributivity_methods": ["Shapley values", "Independent scores"], "multi_partner_learning_approach": "fedavg", "aggregation": "uniform", }) params.update({ "gradient_updates_per_pass_count": 5, "epoch_count": 2, "minibatch_count": 2, "is_early_stopping": True, }) params.update({"init_model_from": "random_initialization"}) params.update({"is_quick_demo": False}) # scenario_.dataset object is created inside the Scenario constructor scenario_ = Scenario(**params, scenario_id=0) scenario_.mpl = scenario_._multi_partner_learning_approach( scenario_, is_save_data=True) return scenario_
def create_Scenario(request): dataset = request.param[0]() samples_split_option = request.param[1] params = {"dataset": dataset} params.update({ "partners_count": 3, "amounts_per_partner": [0.2, 0.5, 0.3], "samples_split_option": samples_split_option, "corrupted_datasets": ["not_corrupted"] * 3, }) params.update({ "methods": ["Shapley values", "Independent scores"], "multi_partner_learning_approach": "fedavg", "aggregation": "uniform", }) params.update({ "gradient_updates_per_pass_count": 5, "epoch_count": 2, "minibatch_count": 2, "is_early_stopping": True, }) params.update({"init_model_from": "random_initialization"}) params.update({"is_quick_demo": False}) full_experiment_name = "unit-test-pytest" experiment_path = (Path.cwd() / constants.EXPERIMENTS_FOLDER_NAME / full_experiment_name) # scenario_.dataset object is created inside the Scenario constructor scenario_ = Scenario(**params, experiment_path=experiment_path, scenario_id=0, repeats_count=1) scenario_.mpl = scenario_.multi_partner_learning_approach( scenario_, is_save_data=True) scenario_.instantiate_scenario_partners() # Split data according to scenario and then pre-process successively... # ... train data, early stopping validation data, test data if scenario_.samples_split_type == "basic": scenario_.split_data() elif scenario_.samples_split_type == "advanced": scenario_.split_data_advanced() scenario_.compute_batch_sizes() scenario_.data_corruption() return scenario_