示例#1
0
    def test_emnist_score(self):
        score = eeu.emnist_score(self.fake_images,
                                 ecm.get_trained_emnist_classifier_model())
        self.assertAllClose(score, 1.1598, rtol=0.0001, atol=0.0001)

        score = eeu.emnist_score(self.real_images,
                                 ecm.get_trained_emnist_classifier_model())
        self.assertAllClose(score, 3.9547, rtol=0.0001, atol=0.0001)
示例#2
0
    def test_emnist_frechet_distance(self):
        distance = eeu.emnist_frechet_distance(
            self.real_images, self.fake_images,
            ecm.get_trained_emnist_classifier_model())
        self.assertAllClose(distance, 568.6883, rtol=0.0001, atol=0.0001)

        distance = eeu.emnist_frechet_distance(
            self.real_images, self.real_images,
            ecm.get_trained_emnist_classifier_model())
        self.assertAllClose(distance, 0.0)
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    # Flags.
    hparam_dict = collections.OrderedDict([(name, FLAGS[name].value)
                                           for name in hparam_flags])
    for k in hparam_dict.keys():
        if hparam_dict[k] is None:
            hparam_dict[k] = 'None'
    for k, v in hparam_dict.items():
        print('{} : {} '.format(k, v))

    if FLAGS.invert_imagery_likelihood > 1.0:
        raise ValueError(
            'invert_imagery_likelihood cannot be greater than 1.0')
    if FLAGS.bad_accuracy_cutoff > 1.0:
        raise ValueError('bad_accuracy_cutoff cannot be greater than 1.0')
    if FLAGS.good_accuracy_cutoff > 1.0:
        raise ValueError('good_accuracy_cutoff cannot be greater than 1.0')

    # Training datasets.
    client_real_images_train_tff_data = (
        emnist_data_utils.create_real_images_tff_client_data('train'))

    print('There are %d unique clients.' %
          len(client_real_images_train_tff_data.client_ids))

    # Trained classifier model.
    classifier_model = ecm.get_trained_emnist_classifier_model()

    # Filter down to those client IDs that fall within some accuracy cutoff.
    bad_client_ids_inversion_map, good_client_ids_inversion_map = (
        _get_client_ids_meeting_condition(client_real_images_train_tff_data,
                                          FLAGS.bad_accuracy_cutoff,
                                          FLAGS.good_accuracy_cutoff,
                                          FLAGS.invert_imagery_likelihood,
                                          classifier_model))

    print(
        'There are %d unique clients meeting bad accuracy cutoff condition.' %
        len(bad_client_ids_inversion_map))
    print(
        'There are %d unique clients meeting good accuracy cutoff condition.' %
        len(good_client_ids_inversion_map))

    # Save selected client id dictionary to csv.
    with tf.io.gfile.GFile(FLAGS.path_to_save_bad_clients_csv, 'w') as csvfile:
        w = csv.writer(csvfile)
        for key, val in bad_client_ids_inversion_map.items():
            w.writerow([key, val])

    with tf.io.gfile.GFile(FLAGS.path_to_save_good_clients_csv,
                           'w') as csvfile:
        w = csv.writer(csvfile)
        for key, val in good_client_ids_inversion_map.items():
            w.writerow([key, val])

    print('CSV files with selected Federated EMNIST clients have been saved.')
示例#4
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    logging.set_verbosity(logging.INFO)

    # Flags.
    hparam_dict = collections.OrderedDict([(name, FLAGS[name].value)
                                           for name in hparam_flags])
    for k in hparam_dict.keys():
        if hparam_dict[k] is None:
            hparam_dict[k] = 'None'
    for k, v in hparam_dict.items():
        print('{} : {} '.format(k, v))

    tff.backends.native.set_local_execution_context(
        default_num_clients=FLAGS.num_clients_per_round)

    # Trained classifier model.
    classifier_model = ecm.get_trained_emnist_classifier_model()

    # GAN Models.
    disc_model_fn, gen_model_fn = _get_gan_network_models(FLAGS.noise_dim)

    # Training datasets.
    server_gen_inputs_dataset = _create_gen_inputs_dataset(
        batch_size=FLAGS.server_train_batch_size, noise_dim=FLAGS.noise_dim)
    client_gen_inputs_dataset = _create_gen_inputs_dataset(
        batch_size=CLIENT_TRAIN_BATCH_SIZE, noise_dim=FLAGS.noise_dim)

    if FLAGS.filtering == 'by_user':
        client_real_images_train_tff_data = (
            fedu.get_filtered_by_user_client_data_for_training(
                invert_imagery_probability=FLAGS.invert_imagery_probability,
                accuracy_threshold=FLAGS.accuracy_threshold,
                batch_size=CLIENT_TRAIN_BATCH_SIZE))
    elif FLAGS.filtering == 'by_example':
        client_real_images_train_tff_data = (
            fedu.get_filtered_by_example_client_data_for_training(
                invert_imagery_probability=FLAGS.invert_imagery_probability,
                min_num_examples=FLAGS.min_num_examples,
                example_class_selection=FLAGS.example_class_selection,
                batch_size=CLIENT_TRAIN_BATCH_SIZE))
    else:
        client_real_images_train_tff_data = (
            fedu.get_unfiltered_client_data_for_training(
                batch_size=CLIENT_TRAIN_BATCH_SIZE))

    print('There are %d unique clients that will be used for GAN training.' %
          len(client_real_images_train_tff_data.client_ids))

    # Training: GAN Losses and Optimizers.
    gan_loss_fns = gan_losses.WassersteinGanLossFns(
        grad_penalty_lambda=FLAGS.wass_gp_lambda)
    disc_optimizer = tf.keras.optimizers.SGD(lr=0.0005)
    gen_optimizer = tf.keras.optimizers.SGD(lr=0.005)

    # Eval datasets.
    gen_inputs_eval_dataset = _create_gen_inputs_dataset(
        batch_size=EVAL_BATCH_SIZE, noise_dim=FLAGS.noise_dim)
    real_images_eval_dataset = _create_real_images_dataset_for_eval()

    # Eval hook.
    path_to_output_images = _get_path_to_output_image(FLAGS.root_output_dir,
                                                      FLAGS.exp_name)
    logging.info('path_to_output_images is %s', path_to_output_images)
    eval_hook_fn = _get_emnist_eval_hook_fn(
        FLAGS.exp_name, FLAGS.root_output_dir, hparam_dict, gan_loss_fns,
        gen_inputs_eval_dataset, real_images_eval_dataset,
        FLAGS.num_rounds_per_save_images, path_to_output_images,
        classifier_model)

    # Form the GAN.
    gan = _get_gan(gen_model_fn,
                   disc_model_fn,
                   gan_loss_fns,
                   gen_optimizer,
                   disc_optimizer,
                   server_gen_inputs_dataset,
                   client_real_images_train_tff_data,
                   use_dp=FLAGS.use_dp,
                   dp_l2_norm_clip=FLAGS.dp_l2_norm_clip,
                   dp_noise_multiplier=FLAGS.dp_noise_multiplier,
                   clients_per_round=FLAGS.num_clients_per_round)

    # Training.
    _, tff_time = _train(gan,
                         server_gen_inputs_dataset,
                         client_gen_inputs_dataset,
                         client_real_images_train_tff_data,
                         FLAGS.num_client_disc_train_steps,
                         FLAGS.num_server_gen_train_steps,
                         FLAGS.num_clients_per_round,
                         FLAGS.num_rounds,
                         FLAGS.num_rounds_per_eval,
                         eval_hook_fn,
                         FLAGS.num_rounds_per_checkpoint,
                         output_dir=FLAGS.root_output_dir,
                         exp_name=FLAGS.exp_name)
    logging.info('Total training time was %4.3f seconds.', tff_time)

    print('\nTRAINING COMPLETE.')
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')

  # Flags.
  hparam_dict = collections.OrderedDict([
      (name, FLAGS[name].value) for name in hparam_flags
  ])
  for k in hparam_dict.keys():
    if hparam_dict[k] is None:
      hparam_dict[k] = 'None'
  for k, v in hparam_dict.items():
    print('{} : {} '.format(k, v))

  if FLAGS.invert_imagery_likelihood > 1.0:
    raise ValueError('invert_imagery_likelihood cannot be greater than 1.0')

  # Training datasets.
  client_real_images_train_tff_data = (
      emnist_data_utils.create_real_images_tff_client_data('train'))

  print('There are %d unique clients.' %
        len(client_real_images_train_tff_data.client_ids))

  # Trained classifier model.
  classifier_model = ecm.get_trained_emnist_classifier_model()

  # Filter down to those client IDs that fall within some accuracy cutoff.
  (client_ids_with_correct_examples_map, client_ids_with_incorrect_examples_map,
   client_ids_correct_example_indices_map,
   client_ids_incorrect_example_indices_map) = (
       _get_client_ids_and_examples_based_on_classification(
           client_real_images_train_tff_data, FLAGS.min_num_examples,
           FLAGS.invert_imagery_likelihood, classifier_model))

  print('There are %d unique clients with at least %d correct examples.' %
        (len(client_ids_with_correct_examples_map), FLAGS.min_num_examples))
  print('There are %d unique clients with at least %d incorrect examples.' %
        (len(client_ids_with_incorrect_examples_map), FLAGS.min_num_examples))

  # Save client id dictionarys to csv.
  with tf.io.gfile.GFile(FLAGS.path_to_save_clients_with_correct_examples_csv,
                         'w') as csvfile:
    w = csv.writer(csvfile)
    for key, val in client_ids_with_correct_examples_map.items():
      w.writerow([key, val])

  with tf.io.gfile.GFile(FLAGS.path_to_save_clients_with_incorrect_examples_csv,
                         'w') as csvfile:
    w = csv.writer(csvfile)
    for key, val in client_ids_with_incorrect_examples_map.items():
      w.writerow([key, val])

  with tf.io.gfile.GFile(FLAGS.path_to_save_correct_example_indices_csv,
                         'w') as csvfile:
    w = csv.writer(csvfile)
    for key, val in client_ids_correct_example_indices_map.items():
      w.writerow([key, val])

  with tf.io.gfile.GFile(FLAGS.path_to_save_incorrect_example_indices_csv,
                         'w') as csvfile:
    w = csv.writer(csvfile)
    for key, val in client_ids_incorrect_example_indices_map.items():
      w.writerow([key, val])

  print('CSV files with selected Federated EMNIST clients and lists of '
        'classified/misclassified examples have been saved.')
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    invert_imagery_likelihood = FLAGS.invert_imagery_likelihood
    print('invert_imagery_likelihood is %s' % invert_imagery_likelihood)
    if invert_imagery_likelihood > 1.0:
        raise ValueError(
            'invert_imagery_likelihood cannot be greater than 1.0')

    # TFF Dataset.
    client_real_images_tff_data = (
        emnist_data_utils.create_real_images_tff_client_data(split='train'))
    print('There are %d unique clients.' %
          len(client_real_images_tff_data.client_ids))

    # EMNIST Classifier.
    classifier_model = ecm.get_trained_emnist_classifier_model()

    accuracy_list = []
    overall_total_count = 0
    overall_correct_count = 0
    for client_id in client_real_images_tff_data.client_ids:
        invert_imagery = (1 == np.random.binomial(n=1,
                                                  p=invert_imagery_likelihood))

        # TF Dataset for particular client.
        raw_images_ds = client_real_images_tff_data.create_tf_dataset_for_client(
            client_id)
        # Preprocess into format expected by classifier.
        images_ds = emnist_data_utils.preprocess_img_dataset(
            raw_images_ds,
            invert_imagery=invert_imagery,
            include_label=True,
            batch_size=None,
            shuffle=False,
            repeat=False)
        # Run classifier on all data on client, compute % classified correctly.
        total_count, correct_count = _analyze_classifier(
            images_ds, classifier_model)
        accuracy = float(correct_count) / float(total_count)
        accuracy_list.append(accuracy)

        overall_total_count += total_count
        overall_correct_count += correct_count

    # Calculate histogram.
    bin_width = 1
    histogram = _compute_histogram(accuracy_list, bin_width)
    print('\nHistogram:')
    print(histogram.numpy())
    # Reseasonable check (should be 3400)
    print('(Histogram sum):')
    print(sum(histogram.numpy()))

    # Calculate percentile values.
    percentile_25, percentile_75 = np.percentile(accuracy_list, q=(25, 75))
    print('\nPercentiles...')
    print('25th Percentile : %f' % percentile_25)
    print('75th Percentile : %f' % percentile_75)

    overall_accuracy = (float(overall_correct_count) /
                        float(overall_total_count))
    print('\nOverall classification success percentage: %d / %d (%f)' %
          (overall_correct_count, overall_total_count, overall_accuracy))