def main(_): testX_adv = utils.readdata_np(ADV_DATA_PATH) testy_adv = utils.readdata_np(ADV_LABEL_PATH) test_adv_input = utils.DataProducer(testX_adv, testy_adv, 128, 30, "test_adv") for j in range(10): for i, x, y in test_adv_input.next_batch(): print i test_adv_input.reset_cursor()
def test(name = 'train'): if name == 'train': #read dataset trainX = utils.readdata_np(TRAIN_DATA_PATH) trainy = utils.readdata_np(TRAIN_LABEL_PATH) testX = utils.readdata_np(TEST_DATA_PATH) testy = utils.readdata_np(TEST_LABEL_PATH) train_input = utils.DataProducer(trainX, trainy, FLAGS.batch_size, FLAGS.epoches, "train") test_input = utils.DataProducer(testX, testy, FLAGS.batch_size, FLAGS.epoches, "test") input_dim = trainX.shape[1] _config = { 'n_epoches':FLAGS.epoches, 'batch_size':FLAGS.batch_size, 'output_dim': FLAGS.output_dims, 'input_dim' : input_dim, 'hidden_dims' : list(map(int, FLAGS.hidden_dims.split(','))), 'adv_k' : FLAGS.adv_k, 'K' : FLAGS.K, #recommned K =sqrt(input_dim) 'L' : FLAGS.L } config = utils.ParamWrapper(_config) tf.reset_default_graph() model_mlp = FFN_lh_model(config) tf.set_random_seed(1234) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) if name == 'train': train(sess, model_mlp, train_input, test_input, config) if name == 'evaluate': mal_data = utils.readdata_np(MAL_DATA_PATH) ben_data = utils.readdata_np(BEN_DATA_PATH) input_dim = mal_data.shape[1] _config = { 'n_epoches':FLAGS.epoches, 'batch_size':FLAGS.batch_size, 'output_dim': FLAGS.output_dims, 'input_dim' : input_dim, 'hidden_dims' : list(map(int, FLAGS.hidden_dims.split(','))), 'adv_k' : FLAGS.adv_k, 'K' : FLAGS.K, #recommned K =sqrt(input_dim) 'L' : FLAGS.L } config = utils.ParamWrapper(_config) tf.reset_default_graph() model_mlp = FFN_lh_model(config) tf.set_random_seed(1234) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) evaluate(sess, model_mlp, mal_data, ben_data) if name == 'experiments': ADV_SMPS_dir = './dataset/adv_smps/' ADV_SMPS10= os.path.join(ADV_SMPS_dir, 'adv_smp10.data') ADV_SMPS20 = os.path.join(ADV_SMPS_dir, 'adv_smp20.data') ADV_SMPS30 = os.path.join(ADV_SMPS_dir, 'adv_smp30.data') ADV_SMPS50 = os.path.join(ADV_SMPS_dir, 'adv_smp50.data') ADV_SMPS80 = os.path.join(ADV_SMPS_dir, 'adv_smp80.data') ADV_SMPS_OPT10 = os.path.join(ADV_SMPS_dir, 'adv_smps_opt10.data') ADV_SMPS_OPT20 = os.path.join(ADV_SMPS_dir, 'adv_smps_opt20.data') ADV_SMPS_OPT30 = os.path.join(ADV_SMPS_dir, 'adv_smps_opt30.data') CLEAN_SMPS_label_path = os.path.join(ADV_SMPS_dir, 'clean_smp.label') CLEAN_SMPS_path = os.path.join(ADV_SMPS_dir, 'clean_smp.data') adv_smps10 = utils.readdata_np(ADV_SMPS10) adv_smps20 = utils.readdata_np(ADV_SMPS20) adv_smps30 = utils.readdata_np(ADV_SMPS30) adv_smps50 = utils.readdata_np(ADV_SMPS50) adv_smps80 = utils.readdata_np(ADV_SMPS80) adv_smps_opt10 = utils.readdata_np(ADV_SMPS_OPT10) adv_smps_opt20 = utils.readdata_np(ADV_SMPS_OPT20) adv_smps_opt30 = utils.readdata_np(ADV_SMPS_OPT30) adv_smps0 = utils.readdata_np(CLEAN_SMPS_path) label = utils.readdata_np(CLEAN_SMPS_label_path) test_bias = np.array([[1., 1]], dtype = np.float32) input_dim = adv_smps0.shape[1] _config = { 'n_epoches':FLAGS.epoches, 'batch_size':FLAGS.batch_size, 'output_dim': FLAGS.output_dims, 'input_dim' : input_dim, 'hidden_dims' : list(map(int, FLAGS.hidden_dims.split(','))), 'adv_k' : FLAGS.adv_k, 'K' : FLAGS.K, #recommned K =sqrt(input_dim) 'L' : FLAGS.L } config = utils.ParamWrapper(_config) tf.reset_default_graph() model_mlp = FFN_lh_model(config) tf.set_random_seed(1234) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) saver = tf.train.Saver() model_path = os.path.join(FLAGS.save_dir, "model.ckpt") check_p = tf.train.checkpoint_exists(model_path) if check_p: saver.restore(sess, model_path) else: print("No saved parameters") with sess.as_default(): def _evl(X, y): batches = X.shape[0] // FLAGS.batch_size + 1 accs = [] for mini_i in range(batches): start_i = mini_i * FLAGS.batch_size end_i = FLAGS.batch_size + start_i if end_i > X.shape[0]: end_i = X.shape[0] _feed_dict ={ model_mlp.x : X[start_i : end_i], model_mlp.y : y[start_i : end_i], model_mlp.bias: test_bias, model_mlp.is_training: False, model_mlp.is_adv_training : False } acc = sess.run(model_mlp.acc, feed_dict = _feed_dict) accs.append(acc) return np.mean(accs) acc1 = _evl(adv_smps0, label) acc10 = _evl(adv_smps10, label) acc20 = _evl(adv_smps20, label) acc30 = _evl(adv_smps30, label) acc50 = _evl(adv_smps50, label) acc80 = _evl(adv_smps80, label) acc10_opt = _evl(adv_smps_opt10, label) acc20_opt = _evl(adv_smps_opt20, label) acc30_opt = _evl(adv_smps_opt30, label) MSG = "The predict accuracy of clean data and FGS adversarial examples with perturbations [0, 10, 20, 30, 50, 80] is {0:.5}, {1:.5},{2:.5},{3:.5}, {4:.5},{5:.5} respectively." print(MSG.format(acc1, acc10, acc20, acc30, acc50, acc80)) MSG = "The C.W. adversarial examples with perturbations [10, 20, 30] is {0:.5},{1:.5} and {2:.5}" print(MSG.format(acc10_opt, acc20_opt, acc30_opt))
def traindt(config, train_rpst, trainy): robust_dt = nested_dt(config) robust_dt.learn(train_rpst, trainy) robust_dt.dump(save_dir) def testdt(config, test_rpst, testy): robust_dt = nested_lr(config) robust_dt.load(save_dir) acc = robust_dt.predict(test_rpst, testy) return acc if __name__ == '__main__': trainX = utils.readdata_np(TRAIN_DATA_PATH) trainy = utils.readdata_np(TRAIN_LABEL_PATH)[:, 0] testX = utils.readdata_np(TEST_DATA_PATH) testy = utils.readdata_np(TEST_LABEL_PATH)[:, 0] testX_adv = utils.readdata_np(ADV_DATA_PATH) testy_adv = utils.readdata_np(ADV_LABEL_PATH)[:, 0] mal_data = utils.readdata_np(MAL_DATA_PATH) ben_data = utils.readdata_np(BEN_DATA_PATH) input_dim = trainX.shape[1] _config = { 'save_dir': save_dir, 'input_dim': input_dim, 'K': K, 'L': L, 'max_depth': 2, 'is_rf': False