def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        schedule: Optional[str] = 'none',
        beta: Optional[float] = 0.,
        max_batches_per_client: Optional[int] = -1,
        client_datasets_random_seed: Optional[int] = None,
        crop_size: Optional[int] = 24,
        total_rounds: Optional[int] = 1500,
        experiment_name: Optional[str] = 'federated_cifar100',
        root_output_dir: Optional[str] = '/tmp/fed_opt',
        max_eval_batches: Optional[int] = None,
        **kwargs):
    """Runs an iterative process on the CIFAR-100 classification task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research.utils.training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Moreover, the server state must have an attribute `model` of type
  `tff.learning.ModelWeights`.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_batches_per_client: An optional int specifying the number of batches
      taken by each client at each round. If `-1`, the entire client dataset is
      used.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    crop_size: An optional integer representing the resulting size of input
      images after preprocessing.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_eval_batches: If set to a positive integer, evaluation datasets are
      capped to at most that many batches. If set to None or a nonpositive
      integer, the full evaluation datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `federated_research/utils/training_utils.py`.
  """

    crop_shape = (crop_size, crop_size, 3)

    cifar_train, _, fed_test_data = cifar100_dataset.get_federated_cifar100(
        client_epochs_per_round=client_epochs_per_round,
        train_batch_size=client_batch_size,
        crop_shape=crop_shape,
        max_batches_per_client=max_batches_per_client)

    _, cifar_test = cifar100_dataset.get_centralized_datasets(
        train_batch_size=client_batch_size,
        max_test_batches=max_eval_batches,
        crop_shape=crop_shape)

    input_spec = cifar_train.create_tf_dataset_for_client(
        cifar_train.client_ids[0]).element_spec

    model_builder = functools.partial(resnet_models.create_resnet18,
                                      input_shape=crop_shape,
                                      num_classes=NUM_CLASSES)

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    training_process = iterative_process_builder(tff_model_fn)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=cifar_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    test_fn = training_utils.build_unweighted_test_fn(
        federated_eval_dataset=fed_test_data,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    logging.info('Training model:')
    logging.info(model_builder().summary())
    try:
        var = kwargs['hparam_dict']['var_q_clients']
        q_client = np.load(
            f'/home/monica/AVAIL_VECTORS/q_client_{var}_cifar.npy')
    except:
        logging.info(
            'Could not load q_client - initializing random availabilities')
        q_client = None

    if schedule == 'none':
        client_datasets_fn = training_utils.build_client_datasets_fn(
            train_dataset=cifar_train,
            train_clients_per_round=clients_per_round,
            random_seed=client_datasets_random_seed,
            var_q_clients=kwargs['hparam_dict']['var_q_clients'],
            f_mult=kwargs['hparam_dict']['f_mult'],
            f_intercept=kwargs['hparam_dict']['f_intercept'],
            sine_wave=kwargs['hparam_dict']['sine_wave'],
            use_p=True,
            q_client=q_client,
        )

        training_loop.run(iterative_process=training_process,
                          client_datasets_fn=client_datasets_fn,
                          validation_fn=evaluate_fn,
                          test_fn=test_fn,
                          total_rounds=total_rounds,
                          experiment_name=experiment_name,
                          root_output_dir=root_output_dir,
                          **kwargs)
    elif schedule == 'loss':
        if 'loss_pool_size' in kwargs['hparam_dict'] and kwargs['hparam_dict'][
                'loss_pool_size'] is not None:
            loss_pool_size = kwargs['hparam_dict']['loss_pool_size']
            logging.info(f'Loss pool size: {loss_pool_size}')
            client_datasets_fn = training_utils.build_client_datasets_fn(
                train_dataset=cifar_train,
                train_clients_per_round=loss_pool_size,
                random_seed=client_datasets_random_seed,
                var_q_clients=kwargs['hparam_dict']['var_q_clients'],
                f_mult=kwargs['hparam_dict']['f_mult'],
                f_intercept=kwargs['hparam_dict']['f_intercept'],
                sine_wave=kwargs['hparam_dict']['sine_wave'],
                use_p=True,
                q_client=q_client,
            )
            training_loop_loss.run(iterative_process=training_process,
                                   client_datasets_fn=client_datasets_fn,
                                   validation_fn=evaluate_fn,
                                   test_fn=test_fn,
                                   total_rounds=total_rounds,
                                   total_clients=loss_pool_size,
                                   experiment_name=experiment_name,
                                   root_output_dir=root_output_dir,
                                   **kwargs)
        else:
            raise ValueError('Loss pool size not specified')
    else:
        client_datasets_fn = training_utils.build_availability_client_datasets_fn(
            train_dataset=cifar_train,
            train_clients_per_round=clients_per_round,
            random_seed=client_datasets_random_seed,
            beta=beta,
            var_q_clients=kwargs['hparam_dict']['var_q_clients'],
            f_mult=kwargs['hparam_dict']['f_mult'],
            f_intercept=kwargs['hparam_dict']['f_intercept'],
            sine_wave=kwargs['hparam_dict']['sine_wave'],
            q_client=q_client,
        )
        training_loop_importance.run(iterative_process=training_process,
                                     client_datasets_fn=client_datasets_fn,
                                     validation_fn=evaluate_fn,
                                     test_fn=test_fn,
                                     total_rounds=total_rounds,
                                     experiment_name=experiment_name,
                                     root_output_dir=root_output_dir,
                                     **kwargs)
Ejemplo n.º 2
0
def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        schedule: Optional[str] = 'none',
        beta: Optional[float] = 0.,
        max_batches_per_client: Optional[int] = -1,
        client_datasets_random_seed: Optional[int] = None,
        model: Optional[str] = 'cnn',
        total_rounds: Optional[int] = 1500,
        experiment_name: Optional[str] = 'federated_synthetic',
        root_output_dir: Optional[str] = '/tmp/fed_opt',
        max_eval_batches: Optional[int] = None,
        alpha: Optional[float] = 0.,
        beta_data: Optional[float] = 0.,
        iid: Optional[int] = 0,
        num_users: Optional[int] = 100,
        **kwargs):
    """Runs an iterative process on the EMNIST character recognition task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research.utils.training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Moreover, the server state must have an attribute `model` of type
  `tff.learning.ModelWeights`.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_batches_per_client: An optional int specifying the number of batches
      taken by each client at each round. If `-1`, the entire client dataset is
      used.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    model: A string specifying the model used for character recognition.
      Can be one of `cnn` and `2nn`, corresponding to a CNN model and a densely
      connected 2-layer model (respectively).
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_eval_batches: If set to a positive integer, evaluation datasets are
      capped to at most that many batches. If set to None or a nonpositive
      integer, the full evaluation datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `federated_research/utils/training_utils.py`.
  """
    logging.info(f' DATA PARAMS: ')
    logging.info(f'             Num Users: {num_users}')
    logging.info(f'             alpha: {alpha}')
    logging.info(f'             beta: {beta_data}')
    logging.info(f'             iid: {iid}')
    train_data, test_data, federated_test = synthetic_dataset.generate_federated_softmax_data(
        batch_size=client_batch_size,
        client_epochs_per_round=client_epochs_per_round,
        test_batch_size=100,
        alpha=alpha,
        beta=beta_data,
        iid=iid,
        num_users=num_users)

    input_spec = train_data.create_tf_dataset_for_client(
        train_data.client_ids[0]).element_spec

    model_builder = functools.partial(create_logistic_regression_model)

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    training_process = iterative_process_builder(tff_model_fn)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=test_data,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)
    test_fn = training_utils.build_unweighted_test_fn(
        federated_eval_dataset=federated_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    logging.info('Training model:')
    logging.info(model_builder().summary())
    try:
        var = kwargs['hparam_dict']['var_q_clients']
        print(f'Variance: {var}')
        q_client = np.load(
            f'/home/monica/AVAIL_VECTORS/q_client_{var}_synthetic.npy')
    #if q_client is None:
    #logging.info('Could not load q_client - initializing random availabilities')
    #q_client=None
    except:
        logging.info(
            'Could not load q_client - initializing random availabilities')
        q_client = None

    if schedule == 'none':
        client_datasets_fn = training_utils.build_client_datasets_fn(
            train_dataset=train_data,
            train_clients_per_round=clients_per_round,
            random_seed=client_datasets_random_seed,
            min_clients=kwargs['hparam_dict']['min_clients'],
            var_q_clients=kwargs['hparam_dict']['var_q_clients'],
            f_mult=kwargs['hparam_dict']['f_mult'],
            f_intercept=kwargs['hparam_dict']['f_intercept'],
            sine_wave=kwargs['hparam_dict']['sine_wave'],
            use_p=True,
            q_client=q_client,
        )
        training_loop.run(iterative_process=training_process,
                          client_datasets_fn=client_datasets_fn,
                          validation_fn=evaluate_fn,
                          test_fn=test_fn,
                          total_rounds=total_rounds,
                          experiment_name=experiment_name,
                          root_output_dir=root_output_dir,
                          **kwargs)
    elif schedule == 'loss':
        if 'loss_pool_size' in kwargs['hparam_dict'] and kwargs['hparam_dict'][
                'loss_pool_size'] is not None:
            loss_pool_size = kwargs['hparam_dict']['loss_pool_size']
            logging.info(f'Loss pool size: {loss_pool_size}')

            client_datasets_fn = training_utils.build_client_datasets_fn(
                train_dataset=train_data,
                train_clients_per_round=loss_pool_size,
                random_seed=client_datasets_random_seed,
                min_clients=kwargs['hparam_dict']['min_clients'],
                var_q_clients=kwargs['hparam_dict']['var_q_clients'],
                f_mult=kwargs['hparam_dict']['f_mult'],
                f_intercept=kwargs['hparam_dict']['f_intercept'],
                sine_wave=kwargs['hparam_dict']['sine_wave'],
                use_p=True,
                q_client=q_client)
            training_loop_loss.run(iterative_process=training_process,
                                   client_datasets_fn=client_datasets_fn,
                                   validation_fn=evaluate_fn,
                                   test_fn=test_fn,
                                   total_rounds=total_rounds,
                                   total_clients=loss_pool_size,
                                   experiment_name=experiment_name,
                                   root_output_dir=root_output_dir,
                                   **kwargs)
        else:
            raise ValueError('Loss pool size not specified')
    else:
        init_p = kwargs['hparam_dict']['initialize_p']
        logging.info(f'Initializing as p = {init_p}')
        client_datasets_fn = training_utils.build_availability_client_datasets_fn(
            train_dataset=train_data,
            train_clients_per_round=clients_per_round,
            beta=beta,
            min_clients=kwargs['hparam_dict']['min_clients'],
            var_q_clients=kwargs['hparam_dict']['var_q_clients'],
            f_mult=kwargs['hparam_dict']['f_mult'],
            f_intercept=kwargs['hparam_dict']['f_intercept'],
            sine_wave=kwargs['hparam_dict']['sine_wave'],
            q_client=q_client,
            initialize_p=init_p,
        )
        training_loop_importance.run(iterative_process=training_process,
                                     client_datasets_fn=client_datasets_fn,
                                     validation_fn=evaluate_fn,
                                     test_fn=test_fn,
                                     total_rounds=total_rounds,
                                     experiment_name=experiment_name,
                                     root_output_dir=root_output_dir,
                                     **kwargs)
Ejemplo n.º 3
0
def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        max_batches_per_client: Optional[int] = -1,
        client_datasets_random_seed: Optional[int] = None,
        sequence_length: Optional[int] = 80,
        total_rounds: Optional[int] = 1500,
        experiment_name: Optional[str] = 'federated_shakespeare',
        root_output_dir: Optional[str] = '/tmp/fed_opt',
        max_eval_batches: Optional[int] = None,
        **kwargs):
    """Runs an iterative process on a Shakespeare next character prediction task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research.utils.training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Moreover, the server state must have an attribute `model` of type
  `tff.learning.ModelWeights`.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      a `client_weight_fn`, and returns a `tff.templates.IterativeProcess`. The
      `model_fn` must return a `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_batches_per_client: An optional int specifying the number of batches
      taken by each client at each round. If `-1`, the entire client dataset is
      used.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    sequence_length: An int specifying the length of the character sequences
      used for prediction.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_eval_batches: If set to a positive integer, evaluation datasets are
      capped to at most that many batches. If set to None or a nonpositive
      integer, the full evaluation datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `federated_research/utils/training_utils.py`.
  """

    train_clientdata = shakespeare_dataset.construct_character_level_datasets(
        client_batch_size=client_batch_size,
        client_epochs_per_round=client_epochs_per_round,
        sequence_length=sequence_length,
        max_batches_per_client=max_batches_per_client)

    _, test_dataset = shakespeare_dataset.get_centralized_datasets(
        train_batch_size=client_batch_size,
        max_test_batches=max_eval_batches,
        sequence_length=sequence_length)

    model_builder = functools.partial(create_shakespeare_model,
                                      sequence_length=sequence_length)

    loss_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    input_spec = train_clientdata.create_tf_dataset_for_client(
        train_clientdata.client_ids[0]).element_spec

    def client_weight_fn(local_outputs):
        # Num_tokens is a tensor with type int64[1], to use as a weight need
        # a float32 scalar.
        return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    training_process = iterative_process_builder(
        tff_model_fn, client_weight_fn=client_weight_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=train_clientdata,
        train_clients_per_round=clients_per_round,
        random_seed=client_datasets_random_seed)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=test_dataset,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(iterative_process=training_process,
                      client_datasets_fn=client_datasets_fn,
                      validation_fn=evaluate_fn,
                      test_fn=evaluate_fn,
                      total_rounds=total_rounds,
                      total_clients=clients_per_round,
                      experiment_name=experiment_name,
                      root_output_dir=root_output_dir,
                      **kwargs)
Ejemplo n.º 4
0
def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        max_batches_per_client: Optional[int] = -1,
        client_datasets_random_seed: Optional[int] = None,
        model: Optional[str] = 'cnn',
        total_rounds: Optional[int] = 1500,
        experiment_name: Optional[str] = 'federated_emnist_cr',
        root_output_dir: Optional[str] = '/tmp/fed_opt',
        max_eval_batches: Optional[int] = None,
        **kwargs):
    """Runs an iterative process on the EMNIST character recognition task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research.utils.training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Moreover, the server state must have an attribute `model` of type
  `tff.learning.ModelWeights`.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_batches_per_client: An optional int specifying the number of batches
      taken by each client at each round. If `-1`, the entire client dataset is
      used.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    model: A string specifying the model used for character recognition.
      Can be one of `cnn` and `2nn`, corresponding to a CNN model and a densely
      connected 2-layer model (respectively).
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_eval_batches: If set to a positive integer, evaluation datasets are
      capped to at most that many batches. If set to None or a nonpositive
      integer, the full evaluation datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `federated_research/utils/training_utils.py`.
  """

    emnist_train, _ = emnist_dataset.get_emnist_datasets(
        client_batch_size,
        client_epochs_per_round,
        max_batches_per_client=max_batches_per_client,
        only_digits=False)

    _, emnist_test = emnist_dataset.get_centralized_datasets(
        train_batch_size=client_batch_size,
        max_test_batches=max_eval_batches,
        only_digits=False)

    input_spec = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0]).element_spec

    if model == 'cnn':
        model_builder = functools.partial(
            emnist_models.create_conv_dropout_model, only_digits=False)
    elif model == '2nn':
        model_builder = functools.partial(
            emnist_models.create_two_hidden_layer_model, only_digits=False)
    else:
        raise ValueError(
            'Cannot handle model flag [{!s}], must be one of {!s}.'.format(
                model, EMNIST_MODELS))

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    training_process = iterative_process_builder(model_fn=tff_model_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=emnist_train,
        train_clients_per_round=clients_per_round,
        random_seed=client_datasets_random_seed)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(iterative_process=training_process,
                      client_datasets_fn=client_datasets_fn,
                      validation_fn=evaluate_fn,
                      test_fn=evaluate_fn,
                      total_rounds=total_rounds,
                      total_clients=clients_per_round,
                      experiment_name=experiment_name,
                      root_output_dir=root_output_dir,
                      **kwargs)