Exemple #1
0
 def writer_fn(num_round, metrics, partition):
     metrics_writer.print_metrics(num_round, ids, metrics, groups,
                                  num_samples, partition, args.metrics_dir,
                                  '{}_{}'.format(args.metrics_name, 'stat'))
Exemple #2
0
 def writer_fn(num_round, ids, metrics, groups, num_samples):
     metrics_writer.print_metrics(num_round, ids, metrics, groups,
                                  num_samples, 'train', args.metrics_dir,
                                  '{}_{}'.format(args.metrics_name, 'sys'))
Exemple #3
0
 def writer_fn(num_round, ids, metrics, groups, num_samples):
     metrics_writer.print_metrics(
         num_round, ids, metrics, groups, num_samples, "train",
         args.metrics_dir, "{}_{}_{}".format(args.metrics_name, "sys",
                                             args.log_rank))
Exemple #4
0
def main():

    args = parse_args()

    model_path = '%s/%s.py' % (args.dataset, args.model)
    if not os.path.exists(model_path):
        print('Please specify a valid dataset and a valid model.')
    model_path = '%s.%s' % (args.dataset, args.model)

    print('############################## %s ##############################' %
          model_path)
    mod = importlib.import_module(model_path)
    ClientModel = getattr(mod, 'ClientModel')

    tup = MAIN_PARAMS[args.dataset][args.t]
    num_rounds = args.num_rounds if args.num_rounds != -1 else tup[0]
    eval_every = args.eval_every if args.eval_every != -1 else tup[1]
    clients_per_round = args.clients_per_round if args.clients_per_round != -1 else tup[
        2]

    # Suppress tf warnings
    tf.logging.set_verbosity(tf.logging.WARN)

    # Create 2 models
    model_params = MODEL_PARAMS[model_path]
    if args.lr != -1:
        model_params_list = list(model_params)
        model_params_list[0] = args.lr
        model_params = tuple(model_params_list)
    tf.reset_default_graph()
    client_model = ClientModel(*model_params)
    server_model = ServerModel(ClientModel(*model_params))

    # Create server
    server = Server(server_model)

    # Create clients
    clients = setup_clients(args.dataset, client_model)
    print('%d Clients in Total' % len(clients))

    print(
        'Rounds Train_acc_avg  p_10  p_90  Train_loss_avg  p_10  p_90  Test_acc_avg  p_10 p_90  Test_loss_avg  p_10 p_90'
    )
    # Test untrained model on all clients
    stat_metrics = server.test_model(clients)
    all_ids, all_groups, all_num_samples = server.get_clients_test_info(
        clients)
    metrics_writer.print_metrics(0, all_ids, stat_metrics, all_groups,
                                 all_num_samples, STAT_METRICS_PATH)
    print_metrics(stat_metrics, all_num_samples, 0)

    # Simulate training
    for i in range(num_rounds):
        #print('--- Round %d of %d: Training %d Clients ---' % (i+1, num_rounds, clients_per_round))

        # Select clients to train this round
        server.select_clients(online(clients), num_clients=clients_per_round)
        c_ids, c_groups, c_num_samples = server.get_clients_test_info()

        # Simulate server model training on selected clients' data
        sys_metics = server.train_model(num_epochs=args.num_epochs,
                                        batch_size=args.batch_size,
                                        minibatch=args.minibatch)
        metrics_writer.print_metrics(i, c_ids, sys_metics, c_groups,
                                     c_num_samples, SYS_METRICS_PATH)

        # Update server model
        server.update_model()

        # Test model on all clients
        if (i + 1) % eval_every == 0 or (i + 1) == num_rounds:
            stat_metrics = server.test_model(clients)
            metrics_writer.print_metrics(i, all_ids, stat_metrics, all_groups,
                                         all_num_samples, STAT_METRICS_PATH)
            print_metrics(stat_metrics, all_num_samples, i + 1)

    # Save server model
    save_model(server_model, args.dataset, args.model)

    # Close models
    server_model.close()
    client_model.close()
Exemple #5
0
 def writer_fn(num_round, metrics, partition):
     metrics_writer.print_metrics(
         num_round, ids, metrics, groups, num_samples, partition,
         args.metrics_dir, "{}_{}_{}".format(args.metrics_name, "stat",
                                             args.log_rank))
Exemple #6
0
 def writer_fn(num_round, ids, metrics, groups, num_samples):
     metrics_writer.print_metrics(num_round, ids, metrics, groups,
                                  num_samples, 'train',
                                  'leaf/models/metrics',
                                  '{}_{}'.format('sys', args.metrics_name))
Exemple #7
0
 def writer_fn(num_round, metrics, partition):
     metrics_writer.print_metrics(num_round, ids, metrics, groups,
                                  num_samples, partition,
                                  'leaf/models/metrics',
                                  '{}_{}'.format('stat', args.metrics_name))
Exemple #8
0
 def writer_fn(num_round, metrics, train_or_test):
     metrics_writer.print_metrics(
         num_round, ids, metrics, num_samples, train_or_test, args.metrics_dir, '{}_{}'.format(args.metrics_name, 'stat'))
Exemple #9
0
def main():

    args = parse_args()

    # Set the random seed if provided (affects client sampling, and batching)
    random.seed(1 + args.seed)
    np.random.seed(12 + args.seed)
    tf.set_random_seed(123 + args.seed)

    model_path = '%s/%s.py' % (args.dataset, args.model)
    if not os.path.exists(model_path):
        print('Please specify a valid dataset and a valid model.')
    model_path = '%s.%s' % (args.dataset, args.model)

    print('############################## %s ##############################' %
          model_path)
    mod = importlib.import_module(model_path)
    ClientModel = getattr(mod, 'ClientModel')

    tup = MAIN_PARAMS[args.dataset][args.t]
    num_rounds = args.num_rounds if args.num_rounds != -1 else tup[0]
    eval_every = args.eval_every if args.eval_every != -1 else tup[1]
    clients_per_round = args.clients_per_round if args.clients_per_round != -1 else tup[
        2]

    # Suppress tf warnings
    tf.logging.set_verbosity(tf.logging.WARN)

    # Create 2 models
    model_params = MODEL_PARAMS[model_path]
    if args.lr != -1:
        model_params_list = list(model_params)
        model_params_list[0] = args.lr
        model_params = tuple(model_params_list)

    # Create client model, and share params with server model
    tf.reset_default_graph()
    client_model = ClientModel(args.seed, *model_params)

    # Create server
    server = Server(client_model)

    # Create clients
    train_clients, test_clients = setup_clients(args.dataset, client_model)
    train_ids, train_groups, num_train_samples, _ = server.get_clients_info(
        train_clients)
    test_ids, test_groups, _, num_test_samples = server.get_clients_info(
        test_clients)
    print('Clients in Total: %d train, %d test' %
          (len(train_clients), len(test_clients)))

    # Initial status
    print('--- Round 0 of %d ---' % (num_rounds))
    train_stat_metrics = server.get_train_stats(train_clients)
    print_metrics(train_stat_metrics, num_train_samples, prefix='train_')
    stat_metrics = server.test_model(test_clients)
    metrics_writer.print_metrics(0, test_ids, stat_metrics, test_groups,
                                 num_test_samples, STAT_METRICS_PATH)
    print_metrics(stat_metrics, num_test_samples, prefix='test_')

    # Simulate training
    for i in range(num_rounds):
        print('--- Round %d of %d: Training %d Clients ---' %
              (i + 1, num_rounds, clients_per_round))

        # Select clients to train this round
        server.select_clients(i,
                              online(train_clients),
                              num_clients=clients_per_round)
        c_ids, c_groups, c_num_samples, _ = server.get_clients_info(
            server.selected_clients)

        # Simulate server model training on selected clients' data
        sys_metics = server.train_model(num_epochs=args.num_epochs,
                                        batch_size=args.batch_size,
                                        minibatch=args.minibatch)

        # Update server model
        server.update_model()
        metrics_writer.print_metrics(i, c_ids, sys_metics, c_groups,
                                     c_num_samples, SYS_METRICS_PATH)

        # Test model
        if (i + 1) % eval_every == 0 or (i + 1) == num_rounds:
            train_stat_metrics = server.get_train_stats(train_clients)
            print_metrics(train_stat_metrics,
                          num_train_samples,
                          prefix='train_')
            test_stat_metrics = server.test_model(test_clients)
            metrics_writer.print_metrics((i + 1), test_ids, stat_metrics,
                                         test_groups, num_test_samples,
                                         STAT_METRICS_PATH)
            print_metrics(stat_metrics, num_test_samples, prefix='test_')

    # Save server model
    ckpt_path = os.path.join('checkpoints', args.dataset)
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)
    save_path = server.save_model(
        os.path.join(ckpt_path, '{}.ckpt'.format(args.model)))
    print('Model saved in path: %s' % save_path)

    # Close models
    server.close_model()