def setUp(self):
        self.train_set, _ = dta.get_client_data(
            'emnist',
            'example',
            {
                'supervised': 0.0,
                'unsupervised': 0.0
            },
        )

        self.train_set = dta.get_sample_client_data(self.train_set, 2, 8)

        classifier_ph = autoencoder_ph = {
            'dataset': 'emnist',
            'optimizer': 'SGD',
            'learning_rate': 10.0
        }

        self.classifier = mdl.DenseSupervisedModel(classifier_ph)
        self.autoencoder = mdl.DenseAutoencoderModel(autoencoder_ph)

        self.dataloader_classifier = dta.DataLoader(
            self.classifier.preprocess_emnist,
            num_epochs=1,
            shuffle_buffer=500,
            batch_size=2,
            learning_env='federated')

        self.dataloader_autoencoder = dta.DataLoader(
            self.autoencoder.preprocess_emnist,
            num_epochs=1,
            shuffle_buffer=500,
            batch_size=2,
            learning_env='federated')
예제 #2
0
    def setUp(self):
        ph = {'optimizer': 'SGD', 'learning_rate': 10.0, 'dataset': 'cifar100'}

        keras_model_fn = mdl.SimpleRotationSelfSupervisedModel(ph)
        preprocess_fn = getattr(keras_model_fn,
                                'preprocess_{}'.format(ph['dataset']))

        dataloader = dta.DataLoader(preprocess_fn,
                                    num_epochs=1,
                                    shuffle_buffer=1,
                                    batch_size=20,
                                    learning_env='federated')

        train_client_data, _ = dta.get_client_data(ph['dataset'],
                                                   'example', {
                                                       'supervised': 0.0,
                                                       'unsupervised': 0.0
                                                   },
                                                   sample_client_data=False)

        sample_batch = dataloader.get_sample_batch(train_client_data)
        model_fn = functools.partial(keras_model_fn.create_tff_model_fn,
                                     sample_batch)

        iterative_process = tff.learning.build_federated_averaging_process(
            model_fn)
        state = iterative_process.initialize()

        sample_clients = train_client_data.client_ids[:5]
        federated_train_data = dataloader.make_federated_data(
            train_client_data, sample_clients)

        state, _ = iterative_process.next(state, federated_train_data)

        self.old_model = keras_model_fn()
        self.old_model.build(sample_batch)
        tff.learning.assign_weights_to_keras_model(self.old_model, state.model)

        self.tmp_dir = 'tests/tmp/'
        if not os.path.isdir(self.tmp_dir):
            os.mkdir(self.tmp_dir)

        self.model_fp = os.path.join(self.tmp_dir, 'model.h5')
        keras_model_fn.save_model_weights(self.model_fp, state, sample_batch)

        self.new_model = keras_model_fn.load_model_weights(self.model_fp)

        ph = {
            'optimizer': 'SGD',
            'learning_rate': 10.0,
            'dataset': 'cifar100',
            'pretrained_model_fp': self.model_fp
        }

        self.transfer_model = mdl.SimpleRotationSupervisedModel(ph)()
예제 #3
0
    def setUp(self):
        self.train_set, _ = dta.get_client_data('emnist', 
                                            'example', 
                                            {'supervised':0.0, 
                                            'unsupervised':0.0}
                                            )

        self.train_set = dta.get_sample_client_data(self.train_set, 2, 8)

        self.dataloader_classifier = dta.DataLoader(dta.preprocess_classifier,
                                            num_epochs = 1,
                                            shuffle_buffer = 500,
                                            batch_size = 2
                                            )

        self.dataloader_autoencoder = dta.DataLoader(dta.preprocess_autoencoder,
                                            num_epochs = 1,
                                            shuffle_buffer = 500,
                                            batch_size = 2
                                            )
예제 #4
0
    def run(self):
        run_dir = os.path.join(
            self.ph['log_dir'],
            'run_{}'.format(str(self.ph['curr_run_number'])))

        # set up tensorboard summary writer scope
        with tf.summary.create_file_writer(run_dir).as_default():
            hp.hparams(self.ph.get_hparams())

            # data loading
            train_client_data, test_dataset = dta.get_client_data(
                dataset_name=self.ph['dataset'],
                mask_by=self.ph['mask_by'],
                mask_ratios={
                    'unsupervised': self.ph['unsupervised_mask_ratio'],
                    'supervised': self.ph['supervised_mask_ratio']
                },
                sample_client_data=self.ph['sample_client_data'])
            test_dataset = self.dataloader.preprocess_dataset(test_dataset)

            sample_batch = self.dataloader.get_sample_batch(train_client_data)
            model_fn = functools.partial(
                self.keras_model_fn.create_tff_model_fn, sample_batch)

            # federated training
            iterative_process = tff.learning.build_federated_averaging_process(
                model_fn)
            state = iterative_process.initialize()

            for round_num in range(self.num_rounds):
                sample_clients = np.random.choice(
                    train_client_data.client_ids,
                    size=min(self.num_clients_per_round,
                             len(train_client_data.client_ids)),
                    replace=False)
                federated_train_data = self.dataloader.make_federated_data(
                    train_client_data, sample_clients)
                state, metrics = iterative_process.next(
                    state, federated_train_data)

                if not round_num % self.log_every:
                    print('\nround {:2d}, metrics={}'.format(
                        round_num, metrics))
                    tf.summary.scalar('train_accuracy',
                                      metrics[0],
                                      step=round_num)
                    tf.summary.scalar('train_loss', metrics[1], step=round_num)

                    test_loss, test_accuracy = self.evaluate_central(
                        test_dataset, state)
                    tf.summary.scalar('test_accuracy',
                                      test_accuracy,
                                      step=round_num)
                    tf.summary.scalar('test_loss', test_loss, step=round_num)

            print('\nround {:2d}, metrics={}'.format(round_num, metrics))
            tf.summary.scalar('train_accuracy', metrics[0], step=round_num)
            tf.summary.scalar('train_loss', metrics[1], step=round_num)

            test_loss, test_accuracy = self.evaluate_central(
                test_dataset, state)
            tf.summary.scalar('test_accuracy', test_accuracy, step=round_num)
            tf.summary.scalar('test_loss', test_loss, step=round_num)

        model_fp = os.path.join(run_dir, self.ph['model_fp'])
        self.keras_model_fn.save_model_weights(model_fp, state)
        return
예제 #5
0
    def run(self):
        run_dir = os.path.join(
            self.ph['log_dir'],
            'run_{}'.format(str(self.ph['curr_run_number'])))

        # set up tensorboard summary writer scope
        with tf.summary.create_file_writer(run_dir).as_default():
            hp.hparams(self.ph.get_hparams())

            # data loading
            train_client_data, test_dataset = dta.get_client_data(
                dataset_name=self.ph['dataset'],
                mask_by=self.ph['mask_by'],
                mask_ratios={
                    'unsupervised': self.ph['unsupervised_mask_ratio'],
                    'supervised': self.ph['supervised_mask_ratio']
                },
                sample_client_data=self.ph['sample_client_data'])
            train_dataset = train_client_data.create_tf_dataset_from_all_clients(
            )
            train_dataset = self.dataloader.preprocess_dataset(train_dataset)
            test_dataset = self.dataloader.preprocess_dataset(test_dataset)

            # centralized training
            model = self.keras_model_fn()

            for epoch in range(self.num_epochs):
                model.fit(train_dataset)

                if not epoch % self.log_every:
                    train_loss, train_accuracy = model.evaluate(train_dataset)
                    tf.summary.scalar('train_accuracy',
                                      train_accuracy,
                                      step=epoch)
                    tf.summary.scalar('train_loss', train_loss, step=epoch)

                    test_loss, test_accuracy = model.evaluate(test_dataset)
                    tf.summary.scalar('test_accuracy',
                                      test_accuracy,
                                      step=epoch)
                    tf.summary.scalar('test_loss', test_loss, step=epoch)
                    print(
                        '\nepoch {:2d}, train accuracy={} train loss={} test accuracy={} test loss={}'
                        .format(epoch, train_accuracy, train_loss,
                                test_accuracy, test_loss))

            train_loss, train_accuracy = model.evaluate(train_dataset)
            tf.summary.scalar('train_accuracy', train_accuracy, step=epoch)
            tf.summary.scalar('train_loss', train_loss, step=epoch)

            test_loss, test_accuracy = model.evaluate(test_dataset)
            tf.summary.scalar('test_accuracy', test_accuracy, step=epoch)
            tf.summary.scalar('test_loss', test_loss, step=epoch)
            print(
                '\nepoch {:2d}, train accuracy={} train loss={} test accuracy={} test loss={}'
                .format(epoch, train_accuracy, train_loss, test_accuracy,
                        test_loss))

        model_fp = os.path.join(run_dir, self.ph['model_fp'])
        model.save_weights(model_fp)
        return