Exemplo n.º 1
0
def main() -> None:
    parser = argparse.ArgumentParser(description="Flower")
    parser.add_argument(
        "--server",
        type=str,
        default="[::]:6000",
        help=f"gRPC server address (default: [::]:6000)",
    )
    parser.add_argument("--partition",
                        type=int,
                        required=True,
                        help="Partition index (no default)")
    parser.add_argument(
        "--clients",
        type=int,
        required=True,
        help="Number of clients (no default)",
    )
    args = parser.parse_args()

    # Load and compile Keras model
    model = tf.keras.applications.MobileNetV2()
    model.compile("adam",
                  "sparse_categorical_crossentropy",
                  metrics=["accuracy"])

    # Load CIFAR-10 dataset
    # (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
    xy_train, xy_test = fashion_mnist.load_data(partition=args.partition,
                                                num_partitions=args.clients)

    # Define Flower client
    class CifarClient(fl.client.keras_client.KerasClient):
        def get_weights(self):  # type: ignore
            return model.get_weights()

        def fit(self, weights, config):  # type: ignore
            x_train, y_train = xy_train
            model.set_weights(weights)
            model.fit(x_train, y_train,
                      epochs=5)  #, batch_size=32, steps_per_epoch=3)
            return model.get_weights(), len(x_train), len(x_train)

        def evaluate(self, weights, config):  # type: ignore
            x_test, y_test = xy_test
            model.set_weights(weights)
            loss, accuracy = model.evaluate(x_test, y_test)
            return len(x_test), loss, accuracy

    # Start Flower client
    fl.client.start_keras_client(server_address=args.server,
                                 client=CifarClient())
Exemplo n.º 2
0
def main(argv=None):
    """Run the training experiment"""
    # Read fashion mnist data
    mnist_train, mnist_test = fashion_mnist.load_data()
    config = tf.estimator.RunConfig(
        model_dir=FLAGS.log_dir,
        save_summary_steps=100,
        log_step_count_steps=100,
        save_checkpoints_steps=500,
    )
    # Setup the Estimator
    estimator = tf.estimator.Estimator(model_fn=model_fn, config=config)
    # Start training and validation
    train_spec = tf.estimator.TrainSpec(
        input_fn=lambda: get_train_inputs(FLAGS.batch_size, mnist_train),
        max_steps=FLAGS.steps)
    eval_spec = tf.estimator.EvalSpec(
        input_fn=lambda: get_eval_inputs(FLAGS.batch_size, mnist_test),
        steps=None,
        start_delay_secs=10,  # Start evaluating after 10 sec.
        throttle_secs=30  # Evaluate only every 30 sec
    )
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
Exemplo n.º 3
0
    t = now()
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))
    print('Training time: %s' % (now() - t))
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])


# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

# create two datasets one with digits below 5 and one with 5 and above
x_train_lt5 = x_train[y_train < 5]
y_train_lt5 = y_train[y_train < 5]
x_test_lt5 = x_test[y_test < 5]
y_test_lt5 = y_test[y_test < 5]

x_train_gte5 = x_train[y_train >= 5]
y_train_gte5 = y_train[y_train >= 5] - 5
x_test_gte5 = x_test[y_test >= 5]
y_test_gte5 = y_test[y_test >= 5] - 5

# define two groups of layers: feature (convolutions) and classification (dense)
feature_layers = [
    Conv2D(filters, kernel_size, padding='valid', input_shape=input_shape),
if keras.backend.image_data_format() == 'channels_first':
    input_shape = (1, 28, 28)  # image shape
else:
    input_shape = (28, 28, 1)  # image shape
num_class = 10  # number of class


# load and pre-process data
def preprocess_input(x):
    return x.reshape((-1, ) + input_shape) / 255.


def preprocess_output(y):
    return keras.utils.to_categorical(y)

(train_x, train_y), (validation_x, validation_y) = fashion_mnist.load_data()
train_x, validation_x = map(preprocess_input, [train_x, validation_x])
train_y, validation_y = map(preprocess_output, [train_y, validation_y])
print('Loading FASHION MNIST data...')
print('train_x shape:', train_x.shape, 'train_y shape:', train_y.shape)
print('validation_x shape:', validation_x.shape,
      'validation_y shape', validation_y.shape)


# knowledge transfer algorithms
def wider2net_conv2d(teacher_w1, teacher_b1, teacher_w2, new_width, init):
    '''Get initial weights for a wider conv2d layer with a bigger filters,
    by 'random-padding' or 'net2wider'.

    # Arguments
        teacher_w1: `weight` of conv2d layer to become wider,
Exemplo n.º 5
0
if __name__ == "__main__":


    font = FontProperties(fname = r"/mnt/c/Windows/Fonts/simsun.ttc", size = 6)

    data_path = '../../data/fashion'
    class_names = ['短袖圆领T恤', '裤子', '套衫', '连衣裙', '外套', '凉鞋', '衬衫', '运动鞋','包', '短靴']

    file_list = ['t10k-images-idx3-ubyte.gz',
            't10k-labels-idx1-ubyte.gz',
            'train-images-idx3-ubyte.gz',
            'train-labels-idx1-ubyte.gz']

    for i in file_list:
        load_data(data_path, i)

    #训练数据
    headers, images = extract_train_img_data(os.path.join(data_path, 'train-images-idx3-ubyte.gz'))
    header_array = np.frombuffer(headers, dtype = '>u4')
    print(header_array)

    X = np.zeros(shape = (60000, 784))
    for i in range(header_array[1]):
        X[i] = np.frombuffer(images[i], dtype = '>u1') / 255

    labels = extract_train_label_data(os.path.join(data_path, 'train-labels-idx1-ubyte.gz'))


    #测试数据
    test_headers, test_images = extract_train_img_data(os.path.join(data_path, 't10k-images-idx3-ubyte.gz'))
Exemplo n.º 6
0
def main() -> None:
    parser = argparse.ArgumentParser(description="Flower")
    parser.add_argument(
        "--server_address",
        type=str,
        default="[::]:8080",
        help=f"gRPC server address",
    )
    parser.add_argument("--partition",
                        type=int,
                        required=True,
                        help="Partition index (no default)")
    parser.add_argument(
        "--clients",
        type=int,
        required=True,
        help="Number of clients (no default)",
    )
    args = parser.parse_args()

    # Build and compile Keras model
    # model = tf.keras.models.Sequential(
    #     [
    #         tf.keras.layers.Flatten(input_shape=(28, 28)),
    #         tf.keras.layers.Dense(128, activation="relu"),
    #         tf.keras.layers.Dropout(0.2),
    #         tf.keras.layers.Dense(10, activation="softmax"),
    #     ]
    # )
    # model.compile(
    #     optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
    # )

    model = fashion_mnist.load_model()
    xy_train, xy_test = fashion_mnist.load_data(partition=args.partition,
                                                num_partitions=args.clients)

    # Implement a Flower client
    class MnistClient(fl.client.keras_client.KerasClient):
        def __init__(
            self,
            model: tf.keras.Model,
            x_train: np.ndarray,
            y_train: np.ndarray,
            x_test: np.ndarray,
            y_test: np.ndarray,
        ) -> None:
            self.model = model
            self.x_train, self.y_train = x_train, y_train
            self.x_test, self.y_test = x_test, y_test

        def get_weights(self) -> fl.common.Weights:
            return cast(fl.common.Weights, self.model.get_weights())

        def fit(self, weights: fl.common.Weights,
                config: Dict[str, str]) -> Tuple[fl.common.Weights, int, int]:
            # Use provided weights to update local model
            self.model.set_weights(weights)

            # Train the local model using local dataset
            self.model.fit(
                self.x_train,
                self.y_train,
                batch_size=int(config["batch_size"]),
                epochs=int(config["epochs"]),
            )

            return self.model.get_weights(), len(self.x_train), len(
                self.x_train)

        def evaluate(self, weights: fl.common.Weights,
                     config: Dict[str, str]) -> Tuple[int, float, float]:
            # Update local model and evaluate on local dataset
            self.model.set_weights(weights)
            loss, accuracy = self.model.evaluate(self.x_test,
                                                 self.y_test,
                                                 batch_size=len(self.x_test),
                                                 verbose=2)

            # Return number of evaluation examples and evaluation result (loss/accuracy)
            return len(self.x_test), float(loss), float(accuracy)

    # Load MNIST data
    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # Instanstiate client
    client = MnistClient(model, x_train, y_train, x_test, y_test)

    # Start client
    fl.client.start_keras_client(server_address=args.server_address,
                                 client=client)
if K.backend() == 'tensorflow':
    raise RuntimeError('This example can only run with the '
                       'Theano backend for the time being, '
                       'because it requires taking the gradient '
                       'of a gradient, which isn\'t '
                       'supported for all TF ops.')

# This example assume 'channels_first' data format.
K.set_image_data_format('channels_first')

# input image dimensions
img_rows, img_cols = 28, 28

# the data, shuffled and split between train and test sets
(x_train, _), (x_test, _) = fashion_mnist.load_data()

x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# The size of the kernel used for the MaxPooling2D
pool_size = 2
# The total number of feature maps at each layer
nfeats = [8, 16, 32, 64, 128]