示例#1
0
def prepare_learner(
    data_loaders: Tuple[PrefetchDataset, PrefetchDataset, PrefetchDataset],
    steps_per_epoch: int = 100,
    vote_batches: int = 10,
    learning_rate: float = 0.001,
) -> KerasLearner:
    """
    Creates new instance of KerasLearner
    :param data_loaders: Tuple of train_loader and test_loader
    :param steps_per_epoch: Number of batches per training epoch
    :param vote_batches: Number of batches to get vote_score
    :param learning_rate: Learning rate for optimiser
    :return: New instance of KerasLearner
    """
    learner = KerasLearner(
        model=_get_keras_cifar10_conv2D_model(learning_rate),
        train_loader=data_loaders[0],
        vote_loader=data_loaders[1],
        test_loader=data_loaders[2],
        criterion="sparse_categorical_accuracy",
        minimise_criterion=False,
        model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
        model_evaluate_kwargs={"steps": vote_batches},
    )
    return learner
示例#2
0
def prepare_learner(data_loaders: Tuple[PrefetchDataset, PrefetchDataset,
                                        PrefetchDataset],
                    steps_per_epoch: int = 100,
                    vote_batches: int = 10,
                    learning_rate: float = 0.001) -> KerasLearner:
    """
    Creates new instance of KerasLearner
    :param data_loaders: Tuple of train_loader and test_loader
    :param steps_per_epoch: Number of batches per training epoch
    :param vote_batches: Number of batches to get vote_accuracy
    :param learning_rate: Learning rate for optimiser
    :return: New instance of KerasLearner
    """

    # 2D Convolutional model for image recognition
    loss = "sparse_categorical_crossentropy"
    optimizer = tf.keras.optimizers.Adam

    input_img = tf.keras.Input(shape=(28, 28, 1), name="Input")
    x = tf.keras.layers.Conv2D(32, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv1_1")(input_img)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool1")(x)
    x = tf.keras.layers.Conv2D(32, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv2_1")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool2")(x)
    x = tf.keras.layers.Conv2D(64, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv3_1")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool3")(x)
    x = tf.keras.layers.Flatten(name="flatten")(x)
    x = tf.keras.layers.Dense(64, activation="relu", name="fc1")(x)
    x = tf.keras.layers.Dense(10, activation="softmax", name="fc2")(x)
    model = tf.keras.Model(inputs=input_img, outputs=x)

    opt = optimizer(lr=learning_rate)
    model.compile(loss=loss,
                  metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
                  optimizer=opt)

    learner = KerasLearner(
        model=model,
        train_loader=data_loaders[0],
        vote_loader=data_loaders[1],
        test_loader=data_loaders[2],
        criterion="sparse_categorical_accuracy",
        minimise_criterion=False,
        model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
        model_evaluate_kwargs={"steps": vote_batches},
    )
    return learner
示例#3
0
def nkl():
    """Returns a Keraslearner"""
    model = get_mock_model()
    dl = get_mock_dataloader()
    vote_dl = get_mock_dataloader()
    nkl = KerasLearner(model,
                       dl,
                       vote_dl,
                       diff_priv_config=DiffPrivConfig(target_epsilon=5,
                                                       target_delta=1e-5,
                                                       max_grad_norm=2,
                                                       noise_multiplier=3))

    return nkl
示例#4
0
def prepare_resnet_learner(
    data_loaders: Tuple[PrefetchDataset, PrefetchDataset, PrefetchDataset],
    steps_per_epoch: int = 100,
    vote_batches: int = 10,
    learning_rate: float = 0.001,
) -> KerasLearner:
    # RESNET model
    rows = 28
    cols = 28
    channels = 1
    new_channels = 3
    padding = 2
    n_classes = 10

    input_img = tf.keras.Input(shape=(rows, cols, channels), name="Input")
    x = tf.keras.layers.ZeroPadding2D(padding=padding)(input_img)
    x = tf.keras.layers.Flatten()(x)
    x = tf.keras.layers.RepeatVector(new_channels)(
        x)  # mnist only has one channel so duplicate inputs
    x = tf.keras.layers.Reshape((rows + padding * 2, cols + padding * 2,
                                 new_channels))(x)  # who knows if this works

    resnet = ResNet50(include_top=False, input_tensor=x)

    x = resnet.output
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    x = Dropout(0.7)(x)
    x = tf.keras.layers.Dense(n_classes, activation='softmax')(x)

    model = tf.keras.Model(inputs=input_img, outputs=x)

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
                  loss='sparse_categorical_crossentropy',
                  metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

    learner = KerasLearner(
        model=model,
        train_loader=data_loaders[0],
        vote_loader=data_loaders[1],
        test_loader=data_loaders[2],
        criterion="sparse_categorical_accuracy",
        minimise_criterion=False,
        model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
        model_evaluate_kwargs={"steps": vote_batches},
    )
    return learner
示例#5
0
def prepare_learner(
    data_loaders: Tuple[PrefetchDataset, PrefetchDataset, PrefetchDataset],
    steps_per_epoch: int = 100,
    vote_batches: int = 10,
    learning_rate: float = 0.001,
    diff_priv_config: Optional[DiffPrivConfig] = None,
    num_microbatches: int = 4,
) -> KerasLearner:
    """
    Creates new instance of KerasLearner
    :param data_loaders: Tuple of train_loader and test_loader
    :param steps_per_epoch: Number of batches per training epoch
    :param vote_batches: Number of batches to get vote_accuracy
    :param learning_rate: Learning rate for optimiser
    :return: New instance of KerasLearner
    """

    # 2D Convolutional model for image recognition
    loss = "sparse_categorical_crossentropy"
    optimizer = tf.keras.optimizers.Adam

    input_img = tf.keras.Input(shape=(28, 28, 1), name="Input")
    x = tf.keras.layers.Conv2D(32, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv1_1")(input_img)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool1")(x)
    x = tf.keras.layers.Conv2D(32, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv2_1")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool2")(x)
    x = tf.keras.layers.Conv2D(64, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv3_1")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool3")(x)
    x = tf.keras.layers.Flatten(name="flatten")(x)
    x = tf.keras.layers.Dense(64, activation="relu", name="fc1")(x)
    x = tf.keras.layers.Dense(10, activation="softmax", name="fc2")(x)
    model = tf.keras.Model(inputs=input_img, outputs=x)

    if diff_priv_config is not None:
        opt = DPKerasAdamOptimizer(
            l2_norm_clip=diff_priv_config.max_grad_norm,
            noise_multiplier=diff_priv_config.noise_multiplier,
            num_microbatches=num_microbatches,
            learning_rate=learning_rate)

        model.compile(
            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                # need to calculare the loss per sample for the
                # per sample / per microbatch gradient clipping
                reduction=tf.losses.Reduction.NONE),
            metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
            optimizer=opt)
    else:
        opt = optimizer(lr=learning_rate)
        model.compile(loss=loss,
                      metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
                      optimizer=opt)

    learner = KerasLearner(
        model=model,
        train_loader=data_loaders[0],
        vote_loader=data_loaders[1],
        test_loader=data_loaders[2],
        criterion="sparse_categorical_accuracy",
        minimise_criterion=False,
        model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
        model_evaluate_kwargs={"steps": vote_batches},
        diff_priv_config=diff_priv_config,
    )
    return learner
示例#6
0
    opt = tf.keras.optimizers.Adam(lr=l_rate)
    model.compile(loss="sparse_categorical_crossentropy",
                  metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
                  optimizer=opt)
    return model


all_learner_models = []
for i in range(n_learners):
    all_learner_models.append(
        KerasLearner(
            model=get_model(),
            train_loader=train_datasets[i],
            vote_loader=vote_datasets[i],
            test_loader=test_datasets[i],
            criterion="sparse_categorical_accuracy",
            minimise_criterion=False,
            model_evaluate_kwargs={"steps": vote_batches},
        ))

set_equal_weights(all_learner_models)

# Train the model using Collective Learning
results = Results()
results.data.append(initial_result(all_learner_models))

plot = ColearnPlot(score_name=all_learner_models[0].criterion)

for round_index in range(n_rounds):
    results.data.append(
示例#7
0
        batch_size=batch_size,
        image_size=(width, height),
        color_mode='grayscale'
    ).map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE))
# todo: augmentation (although this seems to be turned off)

all_learner_models = []
for i in range(n_learners):
    model = get_model()
    all_learner_models.append(
        KerasLearner(
            model=model,
            train_loader=train_datasets[i],
            test_loader=test_datasets[i],
            model_fit_kwargs={"steps_per_epoch": steps_per_epoch,
                              # "class_weight": {0: 1, 1: 0.27}
                              },
            model_evaluate_kwargs={"steps": vote_batches},
            criterion="auc",
            minimise_criterion=False
        ))

set_equal_weights(all_learner_models)

results = Results()
# Get initial score
results.data.append(initial_result(all_learner_models))

plot = ColearnPlot(score_name=all_learner_models[0].criterion)

for round_index in range(n_rounds):
示例#8
0
        lr=l_rate
    )
    model.compile(
        loss=loss,
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
        optimizer=opt)
    return model


all_learner_models = []
for i in range(n_learners):
    all_learner_models.append(KerasLearner(
        model=get_model(),
        train_loader=train_datasets[i],
        test_loader=test_datasets[i],
        criterion="sparse_categorical_accuracy",
        minimise_criterion=False,
        model_fit_kwargs={"steps_per_epoch": 100},
        model_evaluate_kwargs={"steps": vote_batches}
    ))

set_equal_weights(all_learner_models)

results = Results()
# Get initial score
results.data.append(initial_result(all_learner_models))

plot = ColearnPlot(score_name=all_learner_models[0].criterion)

for round_index in range(n_rounds):
    results.data.append(
示例#9
0
            # need to calculare the loss per sample for the
            # per sample / per microbatch gradient clipping
            reduction=tf.losses.Reduction.NONE
        ),
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
        optimizer=opt)
    return model


all_learner_models = []
for i in range(n_learners):
    all_learner_models.append(KerasLearner(
        model=get_model(),
        train_loader=train_datasets[i],
        vote_loader=test_datasets[i],
        test_loader=test_datasets[i],
        criterion="sparse_categorical_accuracy",
        minimise_criterion=False,
        model_evaluate_kwargs={"steps": vote_batches},
        diff_priv_config=diff_priv_config
    ))

set_equal_weights(all_learner_models)

results = Results()
# Get initial score
results.data.append(initial_result(all_learner_models))

plot = ColearnPlot(score_name=all_learner_models[0].criterion)

for round_index in range(n_rounds):
    results.data.append(
示例#10
0
    train_dataset.shard(num_shards=n_learners, index=i).batch(batch_size)
    for i in range(n_learners)
]
test_datasets = [
    test_dataset.shard(num_shards=n_learners, index=i).batch(batch_size)
    for i in range(n_learners)
]

all_learner_models = []
for i in range(n_learners):
    model = get_model()
    all_learner_models.append(
        KerasLearner(
            model=model,
            train_loader=train_datasets[i],
            test_loader=test_datasets[i],
            model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
            model_evaluate_kwargs={"steps": vote_batches},
        ))

set_equal_weights(all_learner_models)

results = Results()
# Get initial score
results.data.append(initial_result(all_learner_models))

plot = ColearnPlot(score_name="loss")

for round_index in range(n_rounds):
    results.data.append(
        collective_learning_round(all_learner_models, vote_threshold,
def nkl():
    """Returns a Keraslearner"""
    model = get_mock_model()
    dl = get_mock_dataloader()
    nkl = KerasLearner(model, dl)
    return nkl
示例#12
0
    test_dataset = test_datagen.flow_from_directory(
        test_data_folders[i + n_learners],
        target_size=(width, height),
        batch_size=batch_size,
        color_mode='grayscale',
        class_mode='binary')
    test_datasets.append(test_dataset)

all_learner_models = []
for i in range(n_learners):
    model = get_model()
    all_learner_models.append(
        KerasLearner(model=model,
                     train_loader=train_datasets[i],
                     vote_loader=vote_datasets[i],
                     test_loader=test_datasets[i],
                     model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
                     model_evaluate_kwargs={"steps": vote_batches},
                     criterion="auc",
                     minimise_criterion=False))

set_equal_weights(all_learner_models)

results = Results()
# Get initial score
results.data.append(initial_result(all_learner_models))

plot = ColearnPlot(score_name=all_learner_models[0].criterion)

for round_index in range(n_rounds):
    result = collective_learning_round(all_learner_models, vote_threshold,
                                       round_index)