示例#1
0
    def train(self, x_train, y_train, x_test, y_test):
        if self.model is None:
            print('Model has not been created yet, run createModel() first.')
            return
        else:
            optimizer = None
            if self.use_tf_privacy:
                optimizer = DPKerasAdamOptimizer(
                    l2_norm_clip=self.params['l2_norm_clip'],
                    noise_multiplier=self.noise_multiplier,
                    learning_rate=self.params['learning_rate'],
                    num_microbatches=self.params['num_microbatches'])
            else:
                # optimizer=keras.optimizers.Adam(0.001)
                optimizer = keras.optimizers.Adam(self.params['learning_rate'])

            self.model.compile(
                optimizer=optimizer,
                loss=keras.losses.CategoricalCrossentropy(
                    from_logits=True
                ),  # TODO: Check if from_logits=True is needed here
                metrics=[keras.metrics.CategoricalAccuracy()])

            self.model.fit(x_train,
                           y_train,
                           batch_size=self.params['batch_size'],
                           epochs=self.params['epochs'],
                           validation_split=self.params['validation_split'])
示例#2
0
    def train(self, x_train, y_train, x_test, y_test):
        if self.model is None:
            print('Model has not been created yet, run createModel() first.')
            return
        else:
            optimizer = None
            if self.use_tf_privacy:
                optimizer = DPKerasAdamOptimizer(
                l2_norm_clip=self.params['l2_norm_clip'],
                noise_multiplier=self.noise_multiplier,
                learning_rate=self.params['learning_rate'],
                num_microbatches=self.params['num_microbatches'])
            else:
                # optimizer=keras.optimizers.Adam(0.001)
                optimizer=keras.optimizers.Adam(self.params['learning_rate'])

            self.model.compile(optimizer=optimizer,
                loss=keras.losses.CategoricalCrossentropy(from_logits=True), # TODO: Check if from_logits=True is needed here
                metrics=[keras.metrics.CategoricalAccuracy()])

            # self.model.fit(x_train, y_train, batch_size=self.params['batch_size'], epochs=self.params['epochs'], validation_split=self.params['validation_split'])

            datagen = ImageDataGenerator(
                featurewise_center=False,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # dimesion reduction
                rotation_range=0.1,  # randomly rotate images in the range
                zoom_range = 0.1, # Randomly zoom image
                width_shift_range=0.1,  # randomly shift images horizontally
                height_shift_range=0.1,  # randomly shift images vertically
                horizontal_flip=False,  # randomly flip images
                vertical_flip=False)  # randomly flip images
            self.model.fit(datagen.flow(x_train, y_train), batch_size=self.params['batch_size'], epochs=self.params['epochs'])
示例#3
0
def get_model():
    input_img = tf.keras.Input(
        shape=(width, height, 1), name="Input"
    )
    x = tf.keras.layers.Conv2D(
        64, (3, 3), activation="relu", padding="same", name="Conv1_1"
    )(input_img)
    x = tf.keras.layers.BatchNormalization(name="bn1")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool1")(x)
    x = tf.keras.layers.Conv2D(
        128, (3, 3), activation="relu", padding="same", name="Conv2_1"
    )(x)
    x = tf.keras.layers.BatchNormalization(name="bn4")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool2")(x)
    x = tf.keras.layers.Flatten(name="flatten")(x)
    x = tf.keras.layers.Dense(
        n_classes, activation="softmax", name="fc1"
    )(x)
    model = tf.keras.Model(inputs=input_img, outputs=x)

    opt = DPKerasAdamOptimizer(
        l2_norm_clip=diff_priv_config.max_grad_norm,
        noise_multiplier=diff_priv_config.noise_multiplier,
        num_microbatches=num_microbatches,
        learning_rate=l_rate)

    model.compile(
        loss=tf.keras.losses.SparseCategoricalCrossentropy(
            # need to calculare the loss per sample for the
            # per sample / per microbatch gradient clipping
            reduction=tf.losses.Reduction.NONE
        ),
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
        optimizer=opt)
    return model
def get_model():
    input_img = tf.keras.Input(
        shape=(width, height, 1), name="Input"
    )
    x = tf.keras.layers.Conv2D(
        64, (3, 3), activation="relu", padding="same", name="Conv1_1"
    )(input_img)
    x = tf.keras.layers.BatchNormalization(name="bn1")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool1")(x)
    x = tf.keras.layers.Conv2D(
        128, (3, 3), activation="relu", padding="same", name="Conv2_1"
    )(x)
    x = tf.keras.layers.BatchNormalization(name="bn4")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool2")(x)
    x = tf.keras.layers.Flatten(name="flatten")(x)
    x = tf.keras.layers.Dense(
        n_classes, activation="softmax", name="fc1"
    )(x)
    model = tf.keras.Model(inputs=input_img, outputs=x)

    opt = DPKerasAdamOptimizer(
        l2_norm_clip=l2_norm_clip,
        noise_multiplier=noise_multiplier,
        num_microbatches=num_microbatches,
        learning_rate=l_rate)

    model.compile(
        loss='sparse_categorical_crossentropy',
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
        optimizer=opt)
    return model
示例#5
0
    def fineTune(self, x_train, y_train):
        for i in range(len(self.model.layers) - self.params['num_layers']):
            self.model.layers[i].trainable = False

        self.model.compile(optimizer=self.model.optimizer,
                           loss=self.model.loss,
                           metrics=[keras.metrics.CategoricalAccuracy()
                                    ])  # TODO: This line is probably needed

        for i in range(len(self.model.layers)):
            print(f'Layer: {i} | Trainable: {self.model.layers[i].trainable}')
        self.model.summary()

        optimizer = None
        if self.use_tf_privacy:
            optimizer = DPKerasAdamOptimizer(
                l2_norm_clip=self.params['l2_norm_clip'],
                noise_multiplier=self.noise_multiplier,
                learning_rate=self.params['learning_rate'],
                num_microbatches=self.params['num_microbatches'])
        else:
            # optimizer=keras.optimizers.Adam(0.001)
            optimizer = keras.optimizers.Adam(self.params['learning_rate'])

        self.model.compile(optimizer=optimizer,
                           loss=keras.losses.CategoricalCrossentropy(),
                           metrics=[keras.metrics.CategoricalAccuracy()])

        # self.model.fit(x_train, y_train, batch_size=self.params['batch_size'], epochs=self.params['epochs'], validation_split=self.params['validation_split'])

        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # dimesion reduction
            rotation_range=0.1,  # randomly rotate images in the range
            zoom_range=0.1,  # Randomly zoom image
            width_shift_range=0.1,  # randomly shift images horizontally
            height_shift_range=0.1,  # randomly shift images vertically
            horizontal_flip=False,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        self.model.fit(datagen.flow(x_train, y_train),
                       batch_size=self.params['batch_size'],
                       epochs=self.params['epochs'])

        weights_list_after = []
        for layer in self.model.layers:
            weights_list_after.append(layer.get_weights())
示例#6
0
def prepare_learner(
    data_loaders: Tuple[PrefetchDataset, PrefetchDataset, PrefetchDataset],
    steps_per_epoch: int = 100,
    vote_batches: int = 10,
    learning_rate: float = 0.001,
    diff_priv_config: Optional[DiffPrivConfig] = None,
    num_microbatches: int = 4,
) -> KerasLearner:
    """
    Creates new instance of KerasLearner
    :param data_loaders: Tuple of train_loader and test_loader
    :param steps_per_epoch: Number of batches per training epoch
    :param vote_batches: Number of batches to get vote_accuracy
    :param learning_rate: Learning rate for optimiser
    :return: New instance of KerasLearner
    """

    # 2D Convolutional model for image recognition
    loss = "sparse_categorical_crossentropy"
    optimizer = tf.keras.optimizers.Adam

    input_img = tf.keras.Input(shape=(28, 28, 1), name="Input")
    x = tf.keras.layers.Conv2D(32, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv1_1")(input_img)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool1")(x)
    x = tf.keras.layers.Conv2D(32, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv2_1")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool2")(x)
    x = tf.keras.layers.Conv2D(64, (5, 5),
                               activation="relu",
                               padding="same",
                               name="Conv3_1")(x)
    x = tf.keras.layers.MaxPooling2D((2, 2), name="pool3")(x)
    x = tf.keras.layers.Flatten(name="flatten")(x)
    x = tf.keras.layers.Dense(64, activation="relu", name="fc1")(x)
    x = tf.keras.layers.Dense(10, activation="softmax", name="fc2")(x)
    model = tf.keras.Model(inputs=input_img, outputs=x)

    if diff_priv_config is not None:
        opt = DPKerasAdamOptimizer(
            l2_norm_clip=diff_priv_config.max_grad_norm,
            noise_multiplier=diff_priv_config.noise_multiplier,
            num_microbatches=num_microbatches,
            learning_rate=learning_rate)

        model.compile(
            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                # need to calculare the loss per sample for the
                # per sample / per microbatch gradient clipping
                reduction=tf.losses.Reduction.NONE),
            metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
            optimizer=opt)
    else:
        opt = optimizer(lr=learning_rate)
        model.compile(loss=loss,
                      metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
                      optimizer=opt)

    learner = KerasLearner(
        model=model,
        train_loader=data_loaders[0],
        vote_loader=data_loaders[1],
        test_loader=data_loaders[2],
        criterion="sparse_categorical_accuracy",
        minimise_criterion=False,
        model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
        model_evaluate_kwargs={"steps": vote_batches},
        diff_priv_config=diff_priv_config,
    )
    return learner
示例#7
0
                                   args.network)
    model = model_module.Network(**args_dict)
    suffix = "{}.bs{}{}{}.ts{}{}.partition={}".format(
        "" if not args.deep_supervision else ".dsup", args.batch_size,
        ".L1{}".format(args.l1) if args.l1 > 0 else "",
        ".L2{}".format(args.l2) if args.l2 > 0 else "", args.timestep,
        ".dp" if args.dp else "", args.partition)
    model.final_name = args.prefix + model.say_name() + suffix
    print("==> model.final_name:", model.final_name)

    # Compile the model
    print("==> compiling the model")
    if args.dp:
        optimizer = DPKerasAdamOptimizer(
            l2_norm_clip=args.l2_norm_clip,
            noise_multiplier=args.noise_multiplier,
            num_microbatches=args.batch_size,
            learning_rate=args.lr,
            beta_1=args.beta_1)
    else:
        optimizer = tf.keras.optimizers.Adam(beta_1=args.beta_1,
                                             learning_rate=args.lr)

    if args.partition == 'none':
        # other options are: 'mean_squared_error', 'mean_absolute_percentage_error'
        loss_function = tf.keras.losses.MeanSquaredLogarithmicError(
            reduction=tf.losses.Reduction.NONE if args.dp else tf.losses.
            Reduction.AUTO)
    else:
        loss_function = tf.keras.losses.SparseCategoricalCrossentropy(
            reduction=tf.losses.Reduction.NONE if args.dp else tf.losses.
            Reduction.AUTO)