Exemple #1
0
 model = xgb.Booster()
 model.load_model(model_path)
 
 classifier = XGBoostClassifier(model=model, clip_values=(0, 1), nb_features=nfeatures, nb_classes=nclasses)
 
 test_data, test_label = load_svmlight_file(data_path, n_features = nfeatures)
 test_data = test_data.toarray()
 test_label = test_label.astype('int')
 n = len(test_label)
 df = pd.DataFrame(test_data)
 df['label'] = test_label
 df = df.sample(frac=1)
 test_label = df['label'].tolist()
 test_data = np.array(df.drop(columns=['label']))   
 
 predictions = np.argmax(classifier.predict(test_data), axis=1)
     
 attack = HopSkipJump(classifier=classifier, norm = np.inf)
 n_selected = 100
 corrected = []
 c_labels = []
 for i in range(len(test_label)):
     if test_label[i] == predictions[i]:
         corrected.append(test_data[i])
         c_labels.append(test_label[i])
     if len(corrected) >= n_selected:
         break
 corrected = np.array(corrected)
 start = time.time()
 test_adv = attack.generate(corrected)
 end = time.time()
evals = [(dtest, "test"), (dtrain, "train")]
model = xgb.train(params=params, dtrain=dtrain, num_boost_round=2, evals=evals)

# Step 3: Create the ART classifier

classifier = XGBoostClassifier(
    model=model, clip_values=(min_pixel_value, max_pixel_value), nb_features=28 * 28, nb_classes=10
)

# Step 4: Train the ART classifier

# The model has already been trained in step 2

# Step 5: Evaluate the ART classifier on benign test examples

predictions = classifier.predict(x_test)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print("Accuracy on benign test examples: {}%".format(accuracy * 100))

# Step 6: Generate adversarial test examples
attack = ZooAttack(
    classifier=classifier,
    confidence=0.0,
    targeted=False,
    learning_rate=1e-1,
    max_iter=200,
    binary_search_steps=10,
    initial_const=1e-3,
    abort_early=True,
    use_resize=False,
    use_importance=False,