def build_network(self): """ Define the FastEstimator network flow. Args: None Returns: network: KerasNetwork object """ epsilon = 0.04 network = fe.Network(ops=[ Watch(inputs="x"), ModelOp(model=self.model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="base_ce"), FGSM( data="x", loss="base_ce", outputs="x_adverse", epsilon=epsilon), ModelOp(model=self.model, inputs="x_adverse", outputs="y_pred_adv"), CrossEntropy(inputs=("y_pred_adv", "y"), outputs="adv_ce"), Average(inputs=("base_ce", "adv_ce"), outputs="avg_ce"), UpdateOp(model=self.model, loss_name="avg_ce") ]) return network
def get_estimator(epochs=10, batch_size=50, epsilon=0.04, save_dir=tempfile.mkdtemp()): train_data, eval_data = cifar10.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline( train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616))]) clean_model = fe.build(model_fn=lambda: LeNet(input_shape=(32, 32, 3)), optimizer_fn="adam", model_name="clean_model") clean_network = fe.Network(ops=[ Watch(inputs="x"), ModelOp(model=clean_model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="base_ce"), FGSM(data="x", loss="base_ce", outputs="x_adverse", epsilon=epsilon, mode="!train"), ModelOp(model=clean_model, inputs="x_adverse", outputs="y_pred_adv", mode="!train"), UpdateOp(model=clean_model, loss_name="base_ce") ]) clean_traces = [ Accuracy(true_key="y", pred_key="y_pred", output_name="clean_accuracy"), Accuracy(true_key="y", pred_key="y_pred_adv", output_name="adversarial_accuracy"), BestModelSaver(model=clean_model, save_dir=save_dir, metric="base_ce", save_best_mode="min"), ] clean_estimator = fe.Estimator(pipeline=pipeline, network=clean_network, epochs=epochs, traces=clean_traces, log_steps=1000) return clean_estimator
def get_estimator(epsilon=0.04, epochs=10, batch_size=32, max_train_steps_per_epoch=None, max_eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): # step 1 train_data, eval_data = cifair10.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), ChannelTranspose(inputs="x", outputs="x"), ]) # step 2 model = fe.build(model_fn=lambda: LeNet(input_shape=(3, 32, 32)), optimizer_fn="adam") network = fe.Network(ops=[ Watch(inputs="x"), ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="base_ce"), FGSM(data="x", loss="base_ce", outputs="x_adverse", epsilon=epsilon), ModelOp(model=model, inputs="x_adverse", outputs="y_pred_adv"), CrossEntropy(inputs=("y_pred_adv", "y"), outputs="adv_ce"), Average(inputs=("base_ce", "adv_ce"), outputs="avg_ce"), UpdateOp(model=model, loss_name="avg_ce") ]) # step 3 traces = [ Accuracy(true_key="y", pred_key="y_pred", output_name="base accuracy"), Accuracy(true_key="y", pred_key="y_pred_adv", output_name="adversarial accuracy"), BestModelSaver(model=model, save_dir=save_dir, metric="adv_ce", save_best_mode="min"), ] estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch, max_eval_steps_per_epoch=max_eval_steps_per_epoch, monitor_names=["base_ce", "adv_ce"]) return estimator
def get_estimator(epsilon=0.04, epochs=20, batch_size=32, code_length=16, train_steps_per_epoch=None, eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): # step 1 train_data, eval_data = cifair10.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline( train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), ChannelTranspose(inputs="x", outputs="x"), Hadamard(inputs="y", outputs="y_code", n_classes=10) ], num_process=0) # step 2 model = fe.build(model_fn=lambda: EccLeNet(code_length=code_length), optimizer_fn="adam") network = fe.Network(ops=[ Watch(inputs="x", mode=('eval', 'test')), ModelOp(model=model, inputs="x", outputs="y_pred_code"), Hinge(inputs=("y_pred_code", "y_code"), outputs="base_hinge"), UpdateOp(model=model, loss_name="base_hinge"), UnHadamard(inputs="y_pred_code", outputs="y_pred", n_classes=10, mode=('eval', 'test')), # The adversarial attack: FGSM(data="x", loss="base_hinge", outputs="x_adverse_hinge", epsilon=epsilon, mode=('eval', 'test')), ModelOp(model=model, inputs="x_adverse_hinge", outputs="y_pred_adv_hinge_code", mode=('eval', 'test')), Hinge(inputs=("y_pred_adv_hinge_code", "y_code"), outputs="adv_hinge", mode=('eval', 'test')), UnHadamard(inputs="y_pred_adv_hinge_code", outputs="y_pred_adv_hinge", n_classes=10, mode=('eval', 'test')), ]) # step 3 traces = [ Accuracy(true_key="y", pred_key="y_pred", output_name="base_accuracy"), Accuracy(true_key="y", pred_key="y_pred_adv_hinge", output_name="adversarial_accuracy"), BestModelSaver(model=model, save_dir=save_dir, metric="base_hinge", save_best_mode="min", load_best_final=True) ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch, monitor_names=["adv_hinge"]) return estimator
def test_tf_input(self): def run_single(tf_data, fgsm): with tf.GradientTape(persistent=True) as tape: x = tf_data * tf_data output = fgsm.forward(data=[tf_data, x], state={'tape': tape}) return output fgsm = FGSM(data='x', loss='loss', outputs='y') strategy = tf.distribute.get_strategy() if isinstance(strategy, tf.distribute.MirroredStrategy): output = strategy.run(run_single, args=(self.tf_data, fgsm)) output = output.values[0] else: output = run_single(self.tf_data, fgsm) self.assertTrue(np.array_equal(output, self.tf_output))
def test_torch_input(self): fgsm = FGSM(data='x', loss='loss', outputs='y') x = self.torch_data * self.torch_data output = fgsm.forward(data=[self.torch_data, x], state={'tape': None}) self.assertTrue(is_equal(output, self.torch_output))
def test_tf_input(self): fgsm = FGSM(data='x', loss='loss', outputs='y') with tf.GradientTape(persistent=True) as tape: x = self.tf_data * self.tf_data output = fgsm.forward(data=[self.tf_data, x], state={'tape': tape}) self.assertTrue(np.array_equal(output, self.tf_output))