def test_cache_dir_not_exists_creates_dirs(self, mock_makedirs, mock_exists): cache_subdir = os.path.join(FLAGS.test_tmpdir, 'test_subdir') download.get_compressed_file('http://www.test.org/my/test/file.lzma', cache_subdir) mock_makedirs.assert_called_once_with(cache_subdir) mock_exists.assert_has_calls([ mock.call(cache_subdir), mock.call(os.path.join(cache_subdir, 'file')), ])
def test_cached_file_is_not_fetched(self, mock_makedirs, mock_exists): mock_urlopen = mock.mock_open(read_data=lzma.compress(b'test')) mock_urlopen.return_value.headers = {} with mock.patch.object(urllib.request, 'urlopen', mock_urlopen): download.get_compressed_file( 'http://www.test.org/my/test/file.lzma', cache_dir=FLAGS.test_tmpdir) mock_exists.assert_has_calls([ mock.call(FLAGS.test_tmpdir), mock.call(os.path.join(FLAGS.test_tmpdir, 'file')) ]) mock_makedirs.assert_not_called() mock_urlopen.assert_not_called()
def load_data( cache_dir: Optional[str] = None ) -> Tuple[client_data.ClientData, client_data.ClientData]: """Loads the federated Shakespeare dataset. Downloads and caches the dataset locally. If previously downloaded, tries to load the dataset from cache. This dataset is derived from the Leaf repository (https://github.com/TalwalkarLab/leaf) pre-processing on the works of Shakespeare, which is published in "LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097. The data set consists of 715 users (characters of Shakespeare plays), where each example corresponds to a contiguous set of lines spoken by the character in a given play. Data set sizes: - train: 16,068 examples - test: 2,356 examples Rather than holding out specific users, each user's examples are split across _train_ and _test_ so that all users have at least one example in _train_ and one example in _test_. Characters that had less than 2 examples are excluded from the data set. The `tf.data.Datasets` returned by `tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield `collections.OrderedDict` objects at each iteration, with the following keys and values: - `'snippets'`: a `tf.Tensor` with `dtype=tf.string`, the snippet of contiguous text. Args: cache_dir: (Optional) directory to cache the downloaded file. If `None`, caches in Keras' default cache directory. Returns: Tuple of (train, test) where the tuple elements are `tff.simulation.datasets.ClientData` objects. """ database_path = download.get_compressed_file( origin= 'https://storage.googleapis.com/tff-datasets-public/shakespeare.sqlite.lzma', cache_dir=cache_dir) train_client_data = sql_client_data.SqlClientData( database_path, split_name='train').preprocess(_add_parsing) test_client_data = sql_client_data.SqlClientData( database_path, split_name='test').preprocess(_add_parsing) return train_client_data, test_client_data
def test_uncache_file_is_fetched_without_content_length( self, mock_makedirs, mock_exists): test_data = b'data' test_url = 'http://www.test.org/my/test/file.lzma' mock_urlopen = mock.mock_open(read_data=lzma.compress(test_data)) # Do not add content-length headers, ensuring that the python code # doesn't error if the HTTP header was missing. mock_urlopen.return_value.headers = {} with mock.patch.object(urllib.request, 'urlopen', mock_urlopen): path = download.get_compressed_file(test_url, cache_dir=FLAGS.test_tmpdir) expected_output_path = os.path.join(FLAGS.test_tmpdir, 'file') self.assertEqual(path, expected_output_path) mock_exists.assert_has_calls( [mock.call(FLAGS.test_tmpdir), mock.call(expected_output_path)]) mock_urlopen.assert_called_once_with(test_url) mock_makedirs.assert_not_called() self.assertTrue(os.path.exists(expected_output_path)) with open(expected_output_path, 'rb') as test_file: self.assertEqual(test_file.read(), test_data)
def test_uncache_file_is_fetched_with_content_length( self, mock_makedirs, mock_exists): test_data = b'data' test_url = 'http://www.test.org/my/test/file.lzma' mock_urlopen = mock.mock_open(read_data=lzma.compress(test_data)) mock_urlopen.return_value.headers = { 'context-length': str(len(test_data)) } with mock.patch('urllib.request.urlopen', mock_urlopen): path = download.get_compressed_file(test_url, cache_dir=FLAGS.test_tmpdir) expected_output_path = os.path.join(FLAGS.test_tmpdir, 'file') self.assertEqual(path, expected_output_path) mock_exists.assert_has_calls( [mock.call(FLAGS.test_tmpdir), mock.call(expected_output_path)]) mock_urlopen.assert_called_once_with(test_url) mock_makedirs.assert_not_called() self.assertTrue(os.path.exists(expected_output_path)) with open(expected_output_path, 'rb') as test_file: self.assertEqual(test_file.read(), test_data)
def load_data(cache_dir=None): """Loads the federated Stack Overflow dataset. Downloads and caches the dataset locally. If previously downloaded, tries to load the dataset from cache. This dataset is derived from the Stack Overflow Data hosted by kaggle.com and available to query through Kernels using the BigQuery API: https://www.kaggle.com/stackoverflow/stackoverflow. The Stack Overflow Data is licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported License. To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. The data consists of the body text of all questions and answers. The bodies were parsed into sentences, and any user with fewer than 100 sentences was expunged from the data. Minimal preprocessing was performed as follows: 1. Lowercase the text, 2. Unescape HTML symbols, 3. Remove non-ascii symbols, 4. Separate punctuation as individual tokens (except apostrophes and hyphens), 5. Removing extraneous whitespace, 6. Replacing URLS with a special token. In addition the following metadata is available: 1. Creation date 2. Question title 3. Question tags 4. Question score 5. Type ('question' or 'answer') The data is divided into three sets: - Train: Data before 2018-01-01 UTC except the held-out users. 342,477 unique users with 135,818,730 examples. - Held-out: All examples from users with user_id % 10 == 0 (all dates). 38,758 unique users with 16,491,230 examples. - Test: All examples after 2018-01-01 UTC except from held-out users. 204,088 unique users with 16,586,035 examples. The `tf.data.Datasets` returned by `tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield `collections.OrderedDict` objects at each iteration, with the following keys and values, in lexicographic order by key: - `'creation_date'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing the date/time of the question or answer in UTC format. - `'score'`: a `tf.Tensor` with `dtype=tf.int64` and shape [] containing the score of the question. - `'tags'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing the tags of the question, separated by '|' characters. - `'title'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing the title of the question. - `'tokens'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing the tokens of the question/answer, separated by space (' ') characters. - `'type'`: a `tf.Tensor` with `dtype=tf.string` and shape [] containing either the string 'question' or 'answer'. Args: cache_dir: (Optional) directory to cache the downloaded file. If `None`, caches in Keras' default cache directory. Returns: Tuple of (train, held_out, test) where the tuple elements are `tff.simulation.datasets.ClientData` objects. """ database_path = download.get_compressed_file( origin='https://storage.googleapis.com/tff-datasets-public/stackoverflow.sqlite.lzma', cache_dir=cache_dir) train_client_data = sql_client_data.SqlClientData( database_path, 'train').preprocess(_add_proto_parsing) heldout_client_data = sql_client_data.SqlClientData( database_path, 'heldout').preprocess(_add_proto_parsing) test_client_data = sql_client_data.SqlClientData( database_path, 'test').preprocess(_add_proto_parsing) return train_client_data, heldout_client_data, test_client_data
def load_data(cache_dir=None): """Loads a federated version of the CIFAR-100 dataset. The dataset is downloaded and cached locally. If previously downloaded, it tries to load the dataset from cache. The dataset is derived from the [CIFAR-100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). The training and testing examples are partitioned across 500 and 100 clients (respectively). No clients share any data samples, so it is a true partition of CIFAR-100. The train clients have string client IDs in the range [0-499], while the test clients have string client IDs in the range [0-99]. The train clients form a true partition of the CIFAR-100 training split, while the test clients form a true partition of the CIFAR-100 testing split. The data partitioning is done using a hierarchical Latent Dirichlet Allocation (LDA) process, referred to as the [Pachinko Allocation Method] (https://people.cs.umass.edu/~mccallum/papers/pam-icml06.pdf) (PAM). This method uses a two-stage LDA process, where each client has an associated multinomial distribution over the coarse labels of CIFAR-100, and a coarse-to-fine label multinomial distribution for that coarse label over the labels under that coarse label. The coarse label multinomial is drawn from a symmetric Dirichlet with parameter 0.1, and each coarse-to-fine multinomial distribution is drawn from a symmetric Dirichlet with parameter 10. Each client has 100 samples. To generate a sample for the client, we first select a coarse label by drawing from the coarse label multinomial distribution, and then draw a fine label using the coarse-to-fine multinomial distribution. We then randomly draw a sample from CIFAR-100 with that label (without replacement). If this exhausts the set of samples with this label, we remove the label from the coarse-to-fine multinomial and renormalize the multinomial distribution. Data set sizes: - train: 500,000 examples - test: 100,000 examples The `tf.data.Datasets` returned by `tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield `collections.OrderedDict` objects at each iteration, with the following keys and values, in lexicographic order by key: - `'coarse_label'`: a `tf.Tensor` with `dtype=tf.int64` and shape [1] that corresponds to the coarse label of the associated image. Labels are in the range [0-19]. - `'image'`: a `tf.Tensor` with `dtype=tf.uint8` and shape [32, 32, 3], corresponding to the pixels of the handwritten digit, with values in the range [0, 255]. - `'label'`: a `tf.Tensor` with `dtype=tf.int64` and shape [1], the class label of the corresponding image. Labels are in the range [0-99]. Args: cache_dir: (Optional) directory to cache the downloaded file. If `None`, caches in Keras' default cache directory. Returns: Tuple of (train, test) where the tuple elements are `tff.simulation.datasets.ClientData` objects. """ database_path = download.get_compressed_file( origin= 'https://storage.googleapis.com/tff-datasets-public/cifar100.sqlite.lzma', cache_dir=cache_dir) train_client_data = sql_client_data.SqlClientData( database_path, 'train').preprocess(_add_proto_parsing) test_client_data = sql_client_data.SqlClientData( database_path, 'test').preprocess(_add_proto_parsing) return train_client_data, test_client_data
def load_data(only_digits=True, cache_dir=None): """Loads the Federated EMNIST dataset. Downloads and caches the dataset locally. If previously downloaded, tries to load the dataset from cache. This dataset is derived from the Leaf repository (https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST dataset, grouping examples by writer. Details about Leaf were published in "LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097. *Note*: This dataset does not include some additional preprocessing that MNIST includes, such as size-normalization and centering. In the Federated EMNIST data, the value of 1.0 corresponds to the background, and 0.0 corresponds to the color of the digits themselves; this is the *inverse* of some MNIST representations, e.g. in [tensorflow_datasets] (https://github.com/tensorflow/datasets/blob/master/docs/datasets.md#mnist), where 0 corresponds to the background color, and 255 represents the color of the digit. Data set sizes: *only_digits=True*: 3,383 users, 10 label classes - train: 341,873 examples - test: 40,832 examples *only_digits=False*: 3,400 users, 62 label classes - train: 671,585 examples - test: 77,483 examples Rather than holding out specific users, each user's examples are split across _train_ and _test_ so that all users have at least one example in _train_ and one example in _test_. Writers that had less than 2 examples are excluded from the data set. The `tf.data.Datasets` returned by `tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield `collections.OrderedDict` objects at each iteration, with the following keys and values, in lexicographic order by key: - `'label'`: a `tf.Tensor` with `dtype=tf.int32` and shape [1], the class label of the corresponding pixels. Labels [0-9] correspond to the digits classes, labels [10-35] correspond to the uppercase classes (e.g., label 11 is 'B'), and labels [36-61] correspond to the lowercase classes (e.g., label 37 is 'b'). - `'pixels'`: a `tf.Tensor` with `dtype=tf.float32` and shape [28, 28], containing the pixels of the handwritten digit, with values in the range [0.0, 1.0]. Args: only_digits: (Optional) whether to only include examples that are from the digits [0-9] classes. If `False`, includes lower and upper case characters, for a total of 62 class labels. cache_dir: (Optional) directory to cache the downloaded file. If `None`, caches in Keras' default cache directory. Returns: Tuple of (train, test) where the tuple elements are `tff.simulation.datasets.ClientData` objects. """ database_path = download.get_compressed_file( origin='https://storage.googleapis.com/tff-datasets-public/emnist_all.sqlite.lzma', cache_dir=cache_dir) if only_digits: train_client_data = sql_client_data.SqlClientData( database_path, 'digits_only_train').preprocess(_add_proto_parsing) test_client_data = sql_client_data.SqlClientData( database_path, 'digits_only_test').preprocess(_add_proto_parsing) else: train_client_data = sql_client_data.SqlClientData( database_path, 'all_train').preprocess(_add_proto_parsing) test_client_data = sql_client_data.SqlClientData( database_path, 'all_test').preprocess(_add_proto_parsing) return train_client_data, test_client_data
def test_non_lzma_extension_errors(self): with self.assertRaises(ValueError): download.get_compressed_file( 'http://www.test.org/my/test/file.bz2')
def load_data(split_by_clients=True, cache_dir=None): """Loads the Federated CelebA dataset. Downloads and caches the dataset locally. If previously downloaded, tries to load the dataset from cache. This dataset is derived from the [LEAF repository](https://github.com/TalwalkarLab/leaf) preprocessing of the [CelebA dataset](https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html), grouping examples by celebrity id. Details about LEAF were published in ["LEAF: A Benchmark for Federated Settings"](https://arxiv.org/abs/1812.01097), and details about CelebA were published in ["Deep Learning Face Attributes in the Wild"](https://arxiv.org/abs/1411.7766). The raw CelebA dataset contains 10,177 unique identities. During LEAF preprocessing, all clients with less than 5 examples are removed; this leaves 9,343 clients. The data is available with train and test splits by clients or by examples. That is, when split by clients, ~90% of clients are selected for the train set, ~10% of clients are selected for test, and all the examples for a given user are part of the same data split. When split by examples, each client is located in both the train data and the test data, with ~90% of the examples on each client selected for train and ~10% of the examples selected for test. Data set sizes: *split_by_clients=True*: - train: 8,408 clients, 180,429 total examples - test: 935 clients, 19,859 total examples *split_by_clients=False*: - train: 9,343 clients, 177,457 total examples - test: 9,343 clients, 22,831 total examples The `tf.data.Datasets` returned by `tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield `collections.OrderedDict` objects at each iteration. These objects have a key/value pair storing the image of the celebrity: - `'image'`: a `tf.Tensor` with `dtype=tf.int64` and shape [84, 84, 3], containing the red/blue/green pixels of the image. Each pixel is a value in the range [0, 255]. The OrderedDict objects also contain an additional 40 key/value pairs for the celebrity image attributes, each of the format: - `{attribute name}`: a `tf.Tensor` with `dtype=tf.bool` and shape [1], set to True if the celebrity has this attribute in the image, or False if they don't. The attribute names are: 'five_o_clock_shadow', 'arched_eyebrows', 'attractive', 'bags_under_eyes', 'bald', 'bangs', 'big_lips', 'big_nose', 'black_hair', 'blond_hair', 'blurry', 'brown_hair', 'bushy_eyebrows', 'chubby', 'double_chin', 'eyeglasses', 'goatee', 'gray_hair', 'heavy_makeup', 'high_cheekbones', 'male', 'mouth_slightly_open', 'mustache', 'narrow_eyes', 'no_beard', 'oval_face', 'pale_skin', 'pointy_nose', 'receding_hairline', 'rosy_cheeks', 'sideburns', 'smiling', 'straight_hair', 'wavy_hair', 'wearing_earrings', 'wearing_hat', 'wearing_lipstick', 'wearing_necklace', 'wearing_necktie', 'young' Note: The CelebA dataset may contain potential bias. The [fairness indicators TF tutorial]( https://www.tensorflow.org/responsible_ai/fairness_indicators/tutorials/Fairness_Indicators_TFCO_CelebA_Case_Study) goes into detail about several considerations to keep in mind while using the CelebA dataset. Args: split_by_clients: There are 9,343 clients in the federated CelebA dataset with 5 or more examples. If this argument is True, clients are divided into train and test groups, with 8,408 and 935 clients respectively. If this argument is False, the data is divided by examples instead, i.e., all clients participate in both the train and test groups, with ~90% of the examples belonging to the train group and the rest belonging to the test group. cache_dir: (Optional) directory to cache the downloaded file. If `None`, caches in Keras' default cache directory. Returns: Tuple of `(train, test)` where the tuple elements are `tff.simulation.datasets.ClientData` objects. """ database_path = download.get_compressed_file( origin='https://storage.googleapis.com/tff-datasets-public/celeba.sqlite.lzma', cache_dir=cache_dir) if split_by_clients: train_client_data = sql_client_data.SqlClientData( database_path, 'split_by_clients_train').preprocess(_add_proto_parsing) test_client_data = sql_client_data.SqlClientData( database_path, 'split_by_clients_test').preprocess(_add_proto_parsing) else: train_client_data = sql_client_data.SqlClientData( database_path, 'split_by_examples_train').preprocess(_add_proto_parsing) test_client_data = sql_client_data.SqlClientData( database_path, 'split_by_examples_test').preprocess(_add_proto_parsing) return train_client_data, test_client_data