by_train = by_train.astype('int32') bx_test = bx_test.astype('float32') by_test = by_test.astype('int32') # [DEBUG] print(' : Load the backdoor dataset [{}]'.format(args.poisonp)) print(' Train : {} in [{}, {}]'.format(bx_train.shape, bx_train.min(), bx_train.max())) print(' Test : {} in [{}, {}]'.format(bx_test.shape, bx_test.min(), bx_test.max())) # blend the backdoor data, and compose into the tensorflow datasets bd_x_train = np.concatenate((x_train, bx_train), axis=0) bd_y_train = np.concatenate((y_train, by_train), axis=0) bd_train_dataset = datasets.convert_to_tf_dataset(bd_x_train, bd_y_train, batch=batch_size, shuffle=True) bd_ctest_dataset = datasets.convert_to_tf_dataset(x_test, y_test, batch=batch_size) bd_btest_dataset = datasets.convert_to_tf_dataset(bx_test, by_test, batch=batch_size) print(' : Construct them into the TF datasets') # # compute the baseline accuracy baseline_acc = _validate(base_model, bd_ctest_dataset) baseline_bacc = _validate(base_model, bd_btest_dataset) print( ' : Baseline accuracies on clean [{:.4f}] / backdoor [{:.4f}]'.format( baseline_acc, baseline_bacc))
y_poison = y_poison.astype('int32') # enforce the poisons to be within [0, 1] range x_poison = np.clip(x_poison, 0., 1.) # [DEBUG] print(' : Load the poison data from [{}]'.format(args.poisonp)) print(' Train : {} in [{}, {}]'.format(x_train.shape, x_train.min(), x_train.max())) print(' Test : {} in [{}, {}]'.format(x_test.shape, x_test.min(), x_test.max())) print(' Poison: {} in [{}, {}]'.format(x_poison.shape, x_poison.min(), x_poison.max())) # compose into the tensorflow datasets clean_validset = datasets.convert_to_tf_dataset(x_test, y_test) # load the baseline acc baseline_acc = _validate(baseline_model, clean_validset) print(' : Baseline model\'s accuracy is [{}]'.format(baseline_acc)) # -------------------------------------------------------------------------- # Substitute the numpy module used by JAX (when privacy) # -------------------------------------------------------------------------- import jax.numpy as np # -------------------------------------------------------------------------- # Set the location to store... # -------------------------------------------------------------------------- # extract the setup poison_task = args.poisonp.split('/')[3]
(x_train, y_train), (x_test, y_test), (x_poison, y_poison) = \ datasets.load_slab_poisons(args.poisonp) else: assert False, ('Error: unknown format file - {}'.format(args.poisonp)) # enforce the poisons to be within [0, 1] range x_poison = np.clip(x_poison, 0., 1.) # [DEBUG] print (' : Load the poison data from [{}]'.format(args.poisonp)) print (' Train : {} in [{}, {}]'.format(x_train.shape, x_train.min(), x_train.max())) print (' Test : {} in [{}, {}]'.format(x_test.shape, x_test.min(), x_test.max())) print (' Poison: {} in [{}, {}]'.format(x_poison.shape, x_poison.min(), x_poison.max())) # compose into the tensorflow datasets clean_validset = datasets.convert_to_tf_dataset(x_test, y_test) # to examine the training time accuracy on clean and poison samples ctrain_examine = datasets.convert_to_tf_dataset(x_train, y_train) ptrain_examine = datasets.convert_to_tf_dataset(x_poison, y_poison) # load the baseline acc baseline_acc = _validate(baseline_model, clean_validset) print (' : Baseline model\'s accuracy is [{}]'.format(baseline_acc)) # -------------------------------------------------------------------------- # Set the location to store... # -------------------------------------------------------------------------- # extract the setup poison_task = args.poisonp.split('/')[3]