Ejemplo n.º 1
0
def standard_nn(X_train, y_train, X_test, y_test, X_extra, y_extra, ep):
    n_test = len(y_test)
    n_extra = len(y_extra)
    standard_acc = []
    neigh = KNeighborsClassifier(n_neighbors=1)
    neigh.fit(X_train, y_train)
    y_extra_pred = neigh.predict(X_extra)

    clf = None
    dt = None
    mask = None
    mapping = 'noMapping'

    [clf, mask,
     [acc_train, acc_extra,
      acc_original]] = generate_sub(X_train, y_train, X_test, y_test, X_extra,
                                    y_extra_pred, neigh, ep[-1])
    for i in range(len(ep)):
        eps = ep[i]
        adv_test = generate_adversarial_examples(FLAG, eps, X_train, X_test,
                                                 y_train, y_test, X_extra,
                                                 y_extra_pred, mapping, clf,
                                                 mask)
        print adv_test - X_test
        y_adv = neigh.predict(adv_test)
        adv = sum(abs(y_test - y_adv)) / 2
        test_acc = 1 - (adv * 1.0 / n_test)
        standard_acc.append(test_acc)

    return [standard_acc, [acc_train, acc_extra, acc_original]]
Ejemplo n.º 2
0
def at_nn_all(X_train, y_train, X_test, y_test, X_extra, y_extra, ep):
    n_test = len(y_test)
    n_extra = len(y_extra)
    atnn_acc = []
    [new_train, y_new_train, neigh] = ATNN_all(X_train, y_train, eps_aug_all)
    y_extra_pred = neigh.predict(X_extra)

    clf = None
    dt = None
    mask = None
    mapping = 'noMapping'

    [clf, mask,
     [acc_train, acc_extra,
      acc_original]] = generate_sub(new_train, y_new_train, X_test, y_test,
                                    X_extra, y_extra_pred, neigh, ep[-1])
    for i in range(len(ep)):
        eps = ep[i]
        adv_test = generate_adversarial_examples(FLAG, eps, new_train, X_test,
                                                 y_new_train, y_test, X_extra,
                                                 y_extra_pred, mapping, clf,
                                                 mask)
        y_adv = neigh.predict(adv_test)
        adv = sum(abs(y_test - y_adv)) / 2
        test_acc = 1 - (adv * 1.0 / n_test)
        atnn_acc.append(test_acc)
    if clf:
        return [atnn_acc, [acc_train, acc_extra, acc_original]]
    else:
        return atnn_acc
Ejemplo n.º 3
0
def ATNN(X_train, y_train, eps, X_aug=None, y_aug=None):
    neigh = KNeighborsClassifier(n_neighbors=1)
    neigh.fit(X_train, y_train)
    if X_aug and y_aug:
        new_train = np.concatenate((X_train, X_aug), axis=0)
        y_new_train = np.concatenate((y_train, y_aug), axis=0)
    else:
        clf = None
        dt = None
        mask = None
        mapping = 'noMapping'
        if FLAG == 'dt':
            clf, dt = prepare_tree(X_train, y_train, MAX_DEPTH)
        elif FLAG == 'lr':
            clf = LogisticRegression()
            clf.fit(X_train, y_train)
        elif FLAG == 'kernel' or FLAG == 'wb_kernel':
            clf = Kernel_Classifier(c=C)
            clf.fit(X_train, y_train)
        elif FLAG == 'nn':
            clf = neural_net_classifier(task=TASK)
            clf.fit(X_train,
                    y_train,
                    target_model=neigh,
                    model=model,
                    train_params=train_params,
                    shape=shape)
        else:
            clf = neigh
        y_pred = clf.predict(X_train).reshape(y_train.shape)
        mask = y_pred == y_train
        mask = [True for i in range(len(y_train))]
        if FLAG == 'dt':
            clf = dt
        aug = generate_adversarial_examples(FLAG, eps, X_train, X_train,
                                            y_train, y_train, X_train, y_train,
                                            mapping, clf, mask, 'attack')
        new_train = np.concatenate((X_train, aug), axis=0)
        y_new_train = np.concatenate((y_train, y_train), axis=0)
    [new_train, y_new_train] = shuffle_data(new_train, y_new_train)
    neigh.fit(new_train, y_new_train)
    return [new_train, y_new_train, neigh]
Ejemplo n.º 4
0
def robust_nn(X_train, y_train, X_test, y_test, X_extra, y_extra, ep, eps_adv):
    n_test = len(y_test)
    n_extra = len(y_extra)
    robustnn_acc = []
    print ep
    robust_1nn = Robust_1NN(X_train, y_train, 0.45, 0.1, eps_adv)
    robust_1nn.find_confident_label()
    robust_1nn.find_red_points()
    robust_1nn.fit()
    [new_train, y_new_train] = robust_1nn.get_data()
    neigh = robust_1nn.get_clf()

    y_extra_pred = neigh.predict(X_extra)

    clf = None
    dt = None
    mask = None
    mapping = 'noMapping'

    [clf, mask,
     [acc_train, acc_extra,
      acc_original]] = generate_sub(new_train, y_new_train, X_test, y_test,
                                    X_extra, y_extra_pred, neigh, ep[-1])

    for i in range(len(ep)):
        eps = ep[i]
        adv_test = generate_adversarial_examples(FLAG, eps, new_train, X_test,
                                                 y_new_train, y_test, X_extra,
                                                 y_extra_pred, mapping, clf,
                                                 mask)
        #print adv_test
        y_adv = neigh.predict(adv_test)
        adv = sum(abs(y_test - y_adv)) / 2
        test_acc = 1 - (adv * 1.0 / n_test)
        robustnn_acc.append(test_acc)
    return [robustnn_acc, [acc_train, acc_extra, acc_original]]
Ejemplo n.º 5
0
def ATNN_all(X_train, y_train, eps, X_aug=None, y_aug=None):
    neigh = KNeighborsClassifier(n_neighbors=1)
    neigh.fit(X_train, y_train)
    if X_aug and y_aug:
        new_train = np.concatenate((X_train, X_aug), axis=0)
        y_new_train = np.concatenate((y_train, y_aug), axis=0)
    else:
        clf = None
        dt = None
        mask = None
        clfs = []
        masks = []
        #FLAGS = ['wb', 'dt', 'lr', 'kernel', 'nn']
        FLAGS = ['wb', 'kernel', 'nn']
        mapping = 'noMapping'

        new_train = np.copy(X_train)
        y_new_train = np.copy(y_train)

        clf = neigh
        y_pred = clf.predict(X_train).reshape(y_train.shape)
        mask = y_pred == y_train
        clfs.append(clf)
        masks.append(mask)
        '''
        clf, dt = prepare_tree(X_train, y_train, MAX_DEPTH)
        y_pred = clf.predict(X_train).reshape(y_train.shape)
        mask = y_pred==y_train
        clfs.append(dt)
        masks.append(mask)
        
        clf = LogisticRegression()
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_train).reshape(y_train.shape)
        mask = y_pred==y_train
        clfs.append(clf)
        masks.append(mask)
        '''

        clf = Kernel_Classifier(c=C)
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_train).reshape(y_train.shape)
        mask = y_pred == y_train
        clfs.append(clf)
        masks.append(mask)

        clf = neural_net_classifier(task=TASK)
        clf.fit(X_train,
                y_train,
                target_model=neigh,
                model=model,
                train_params=train_params,
                shape=shape)
        y_pred = clf.predict(X_train).reshape(y_train.shape)
        mask = y_pred == y_train
        clfs.append(clf)
        masks.append(mask)
        augs = []
        for (clf, mask, flag) in zip(clfs, masks, FLAGS):
            mask = [True for i in range(len(y_train))]
            aug = generate_adversarial_examples(flag, eps, X_train, X_train,
                                                y_train, y_train, X_train,
                                                y_train, mapping, clf, mask,
                                                'attack')
            augs.append(aug)
            new_train = np.concatenate((new_train, aug), axis=0)
            y_new_train = np.concatenate((y_new_train, y_train), axis=0)
    [new_train, y_new_train] = shuffle_data(new_train, y_new_train)
    neigh.fit(new_train, y_new_train)
    return [new_train, y_new_train, neigh]