Ejemplo n.º 1
0
def load_data(
    cache_dir: Optional[str] = None
) -> Tuple[client_data.ClientData, client_data.ClientData]:
    """Loads the federated Shakespeare dataset.

  Downloads and caches the dataset locally. If previously downloaded, tries to
  load the dataset from cache.

  This dataset is derived from the Leaf repository
  (https://github.com/TalwalkarLab/leaf) pre-processing on the works of
  Shakespeare, which is published in "LEAF: A Benchmark for Federated Settings"
  https://arxiv.org/abs/1812.01097.

  The data set consists of 715 users (characters of Shakespeare plays), where
  each
  example corresponds to a contiguous set of lines spoken by the character in a
  given play.

  Data set sizes:

  -   train: 16,068 examples
  -   test: 2,356 examples

  Rather than holding out specific users, each user's examples are split across
  _train_ and _test_ so that all users have at least one example in _train_ and
  one example in _test_. Characters that had less than 2 examples are excluded
  from the data set.

  The `tf.data.Datasets` returned by
  `tff.simulation.ClientData.create_tf_dataset_for_client` will yield
  `collections.OrderedDict` objects at each iteration, with the following keys
  and values:

    -   `'snippets'`: a `tf.Tensor` with `dtype=tf.string`, the snippet of
      contiguous text.

  Args:
    cache_dir: (Optional) directory to cache the downloaded file. If `None`,
      caches in Keras' default cache directory.

  Returns:
    Tuple of (train, test) where the tuple elements are
    `tff.simulation.ClientData` objects.
  """
    database_path = download.get_compressed_file(
        origin=
        'https://storage.googleapis.com/tff-datasets-public/shakespeare.sqlite.lzma',
        cache_dir=cache_dir)
    train_client_data = sql_client_data.SqlClientData(
        database_path, split_name='train').preprocess(_add_parsing)
    test_client_data = sql_client_data.SqlClientData(
        database_path, split_name='test').preprocess(_add_parsing)
    return train_client_data, test_client_data
Ejemplo n.º 2
0
 def test_split(split_name, expected_examples):
     client_data = sql_client_data.SqlClientData(
         test_dataset_filepath(), split_name=split_name)
     self.assertEqual(
         str(client_data.dataset_computation.type_signature),
         '(string -> string*)')
     dataset = client_data.dataset_computation('test_c')
     actual_examples = dataset.reduce(0, lambda s, x: s + 1)
     self.assertEqual(actual_examples, expected_examples)
Ejemplo n.º 3
0
    def test_deprecation_warning_raised_on_init(self):

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            sql_client_data.SqlClientData(test_dataset_filepath())
            self.assertNotEmpty(w)
            self.assertEqual(w[0].category, DeprecationWarning)
            self.assertRegex(str(w[0].message),
                             'tff.simulation.SqlClientData is deprecated')
Ejemplo n.º 4
0
 def test_split(split_name, example_counts):
     client_data = sql_client_data.SqlClientData(
         test_dataset_filepath(), split_name=split_name)
     self.assertEqual(client_data.client_ids,
                      list(example_counts.keys()))
     self.assertEqual(client_data.element_type_structure,
                      tf.TensorSpec(shape=(), dtype=tf.string))
     expected_examples = sum(example_counts.values())
     dataset = client_data.create_tf_dataset_from_all_clients()
     actual_examples = dataset.reduce(0, lambda s, x: s + 1)
     self.assertEqual(actual_examples, expected_examples)
Ejemplo n.º 5
0
 def test_client_missing(self):
     client_data = sql_client_data.SqlClientData(test_dataset_filepath())
     with self.assertRaisesRegex(ValueError,
                                 'not a client in this ClientData'):
         client_data.create_tf_dataset_for_client('missing_client_id')
Ejemplo n.º 6
0
def load_data(cache_dir=None):
    """Loads the federated Stack Overflow dataset.

  Downloads and caches the dataset locally. If previously downloaded, tries to
  load the dataset from cache.

  This dataset is derived from the Stack Overflow Data hosted by kaggle.com and
  available to query through Kernels using the BigQuery API:
  https://www.kaggle.com/stackoverflow/stackoverflow. The Stack Overflow Data
  is licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported
  License. To view a copy of this license, visit
  http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
  Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.

  The data consists of the body text of all questions and answers. The bodies
  were parsed into sentences, and any user with fewer than 100 sentences was
  expunged from the data. Minimal preprocessing was performed as follows:

  1. Lowercase the text,
  2. Unescape HTML symbols,
  3. Remove non-ascii symbols,
  4. Separate punctuation as individual tokens (except apostrophes and hyphens),
  5. Removing extraneous whitespace,
  6. Replacing URLS with a special token.

  In addition the following metadata is available:

  1. Creation date
  2. Question title
  3. Question tags
  4. Question score
  5. Type ('question' or 'answer')

  The data is divided into three sets:

    -   Train: Data before 2018-01-01 UTC except the held-out users. 342,477
        unique users with 135,818,730 examples.
    -   Held-out: All examples from users with user_id % 10 == 0 (all dates).
        38,758 unique users with 16,491,230 examples.
    -   Test: All examples after 2018-01-01 UTC except from held-out users.
        204,088 unique users with 16,586,035 examples.

  The `tf.data.Datasets` returned by
  `tff.simulation.ClientData.create_tf_dataset_for_client` will yield
  `collections.OrderedDict` objects at each iteration, with the following keys
  and values, in lexicographic order by key:

    -   `'creation_date'`: a `tf.Tensor` with `dtype=tf.string` and shape []
        containing the date/time of the question or answer in UTC format.
    -   `'score'`: a `tf.Tensor` with `dtype=tf.int64` and shape [] containing
        the score of the question.
    -   `'tags'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing
        the tags of the question, separated by '|' characters.
    -   `'title'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing
        the title of the question.
    -   `'tokens'`: a `tf.Tensor` with `dtype=tf.string` and shape []
        containing the tokens of the question/answer, separated by space (' ')
        characters.
    -   `'type'`: a `tf.Tensor` with `dtype=tf.string` and shape []
        containing either the string 'question' or 'answer'.

  Args:
    cache_dir: (Optional) directory to cache the downloaded file. If `None`,
      caches in Keras' default cache directory.

  Returns:
    Tuple of (train, held_out, test) where the tuple elements are
    `tff.simulation.ClientData` objects.
  """
    database_path = download.get_compressed_file(
        origin=
        'https://storage.googleapis.com/tff-datasets-public/stackoverflow.sqlite.lzma',
        cache_dir=cache_dir)
    train_client_data = sql_client_data.SqlClientData(
        database_path, 'train').preprocess(_add_proto_parsing)
    heldout_client_data = sql_client_data.SqlClientData(
        database_path, 'heldout').preprocess(_add_proto_parsing)
    test_client_data = sql_client_data.SqlClientData(
        database_path, 'test').preprocess(_add_proto_parsing)
    return train_client_data, heldout_client_data, test_client_data
Ejemplo n.º 7
0
def load_data(cache_dir=None):
  """Loads a federated version of the CIFAR-100 dataset.

  The dataset is downloaded and cached locally. If previously downloaded, it
  tries to load the dataset from cache.

  The dataset is derived from the [CIFAR-100
  dataset](https://www.cs.toronto.edu/~kriz/cifar.html). The training and
  testing examples are partitioned across 500 and 100 clients (respectively).
  No clients share any data samples, so it is a true partition of CIFAR-100. The
  train clients have string client IDs in the range [0-499], while the test
  clients have string client IDs in the range [0-99]. The train clients form a
  true partition of the CIFAR-100 training split, while the test clients form a
  true partition of the CIFAR-100 testing split.

  The data partitioning is done using a hierarchical Latent Dirichlet Allocation
  (LDA) process, referred to as the [Pachinko Allocation Method]
  (https://people.cs.umass.edu/~mccallum/papers/pam-icml06.pdf) (PAM).
  This method uses a two-stage LDA process, where each client has an associated
  multinomial distribution over the coarse labels of CIFAR-100, and a
  coarse-to-fine label multinomial distribution for that coarse label over the
  labels under that coarse label. The coarse label multinomial is drawn from a
  symmetric Dirichlet with parameter 0.1, and each coarse-to-fine multinomial
  distribution is drawn from a symmetric Dirichlet with parameter 10. Each
  client has 100 samples. To generate a sample for the client, we first select
  a coarse label by drawing from the coarse label multinomial distribution, and
  then draw a fine label using the coarse-to-fine multinomial distribution. We
  then randomly draw a sample from CIFAR-100 with that label (without
  replacement). If this exhausts the set of samples with this label, we
  remove the label from the coarse-to-fine multinomial and renormalize the
  multinomial distribution.

  Data set sizes:
  -   train: 500,000 examples
  -   test: 100,000 examples

  The `tf.data.Datasets` returned by
  `tff.simulation.ClientData.create_tf_dataset_for_client` will yield
  `collections.OrderedDict` objects at each iteration, with the following keys
  and values, in lexicographic order by key:

    -   `'coarse_label'`: a `tf.Tensor` with `dtype=tf.int64` and shape [1] that
        corresponds to the coarse label of the associated image. Labels are
        in the range [0-19].
    -   `'image'`: a `tf.Tensor` with `dtype=tf.uint8` and shape [32, 32, 3],
        corresponding to the pixels of the handwritten digit, with values in
        the range [0, 255].
    -   `'label'`: a `tf.Tensor` with `dtype=tf.int64` and shape [1], the class
        label of the corresponding image. Labels are in the range [0-99].

  Args:
    cache_dir: (Optional) directory to cache the downloaded file. If `None`,
      caches in Keras' default cache directory.

  Returns:
    Tuple of (train, test) where the tuple elements are
    `tff.simulation.ClientData` objects.
  """
  database_path = download.get_compressed_file(
      origin='https://storage.googleapis.com/tff-datasets-public/cifar100.sqlite.lzma',
      cache_dir=cache_dir)
  train_client_data = sql_client_data.SqlClientData(
      database_path, 'train').preprocess(_add_proto_parsing)
  test_client_data = sql_client_data.SqlClientData(
      database_path, 'test').preprocess(_add_proto_parsing)
  return train_client_data, test_client_data