Beispiel #1
0
def test_fuzzing_ascend():
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
    # load network
    net = Net()
    model = Model(net)
    batch_size = 8
    num_classe = 10
    mutate_config = [{
        'method': 'Blur',
        'params': {
            'auto_param': [True]
        }
    }, {
        'method': 'Contrast',
        'params': {
            'factor': [2, 1]
        }
    }, {
        'method': 'Translate',
        'params': {
            'x_bias': [0.1, 0.3],
            'y_bias': [0.2]
        }
    }, {
        'method': 'FGSM',
        'params': {
            'eps': [0.1, 0.2, 0.3],
            'alpha': [0.1]
        }
    }]
    # initialize fuzz test with training dataset
    neuron_num = 10
    segmented_num = 1000
    train_images = np.random.rand(32, 1, 32, 32).astype(np.float32)
    model_coverage_test = ModelCoverageMetrics(model, neuron_num,
                                               segmented_num, train_images)

    # fuzz test with original test data
    # get test data
    test_images = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
    test_labels = np.random.randint(num_classe,
                                    size=batch_size).astype(np.int32)
    test_labels = (np.eye(num_classe)[test_labels]).astype(np.float32)

    initial_seeds = []
    # make initial seeds
    for img, label in zip(test_images, test_labels):
        initial_seeds.append([img, label])

    initial_seeds = initial_seeds[:100]
    model_coverage_test.calculate_coverage(
        np.array(test_images[:100]).astype(np.float32))
    LOGGER.info(TAG, 'KMNC of this test is : %s',
                model_coverage_test.get_kmnc())

    model_fuzz_test = Fuzzer(model, train_images, neuron_num, segmented_num)
    _, _, _, _, metrics = model_fuzz_test.fuzzing(mutate_config, initial_seeds)
    print(metrics)
def test_lenet_mnist_coverage_cpu():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
    # load network
    net = Net()
    model = Model(net)

    # initialize fuzz test with training dataset
    neuron_num = 10
    segmented_num = 1000
    training_data = (np.random.random((10000, 10)) * 20).astype(np.float32)
    model_fuzz_test = ModelCoverageMetrics(model, neuron_num, segmented_num,
                                           training_data)

    # fuzz test with original test data
    # get test data
    test_data = (np.random.random((2000, 10)) * 20).astype(np.float32)
    test_labels = np.random.randint(0, 10, 2000).astype(np.int32)
    model_fuzz_test.calculate_coverage(test_data)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())

    # generate adv_data
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss)
    adv_data = attack.batch_generate(test_data, test_labels, batch_size=32)
    model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())
def test_lenet_mnist_coverage():
    # upload trained network
    ckpt_path = '../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)
    model = Model(net)

    # get training data
    data_list = "../common/dataset/MNIST/train"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=True)
    train_images = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        train_images.append(images)
    train_images = np.concatenate(train_images, axis=0)

    # initialize fuzz test with training dataset
    model_fuzz_test = ModelCoverageMetrics(model, 10, 1000, train_images)

    # fuzz test with original test data
    # get test data
    data_list = "../common/dataset/MNIST/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=True)
    test_images = []
    test_labels = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
    test_images = np.concatenate(test_images, axis=0)
    test_labels = np.concatenate(test_labels, axis=0)
    model_fuzz_test.calculate_coverage(test_images)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())

    # generate adv_data
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss)
    adv_data = attack.batch_generate(test_images, test_labels, batch_size=32)
    model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5)
    LOGGER.info(TAG, 'KMNC of this adv data is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this adv data is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this adv data is : %s', model_fuzz_test.get_snac())
def test_lenet_mnist_coverage_ascend():
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
    # load network
    net = Net()
    model = Model(net)

    # initialize fuzz test with training dataset
    training_data = (np.random.random((10000, 10))*20).astype(np.float32)
    model_fuzz_test = ModelCoverageMetrics(model, 10, 1000, training_data)

    # fuzz test with original test data
    # get test data
    test_data = (np.random.random((2000, 10))*20).astype(np.float32)
    test_labels = np.random.randint(0, 10, 2000)
    test_labels = (np.eye(10)[test_labels]).astype(np.float32)
    model_fuzz_test.calculate_coverage(test_data)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())

    # generate adv_data
    attack = FastGradientSignMethod(net, eps=0.3)
    adv_data = attack.batch_generate(test_data, test_labels, batch_size=32)
    model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())
Beispiel #5
0
def test_lenet_mnist_fuzzing():
    # upload trained network
    ckpt_path = '../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)
    model = Model(net)
    mutate_config = [{'method': 'Blur',
                      'params': {'radius': [0.1, 0.2, 0.3],
                                 'auto_param': [True, False]}},
                     {'method': 'Contrast',
                      'params': {'auto_param': [True]}},
                     {'method': 'Translate',
                      'params': {'auto_param': [True]}},
                     {'method': 'Brightness',
                      'params': {'auto_param': [True]}},
                     {'method': 'Noise',
                      'params': {'auto_param': [True]}},
                     {'method': 'Scale',
                      'params': {'auto_param': [True]}},
                     {'method': 'Shear',
                      'params': {'auto_param': [True]}},
                     {'method': 'FGSM',
                      'params': {'eps': [0.3, 0.2, 0.4], 'alpha': [0.1]}}
                    ]

    # get training data
    data_list = "../common/dataset/MNIST/train"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=False)
    train_images = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        train_images.append(images)
    train_images = np.concatenate(train_images, axis=0)
    neuron_num = 10
    segmented_num = 1000

    # initialize fuzz test with training dataset
    model_coverage_test = ModelCoverageMetrics(model, neuron_num, segmented_num, train_images)

    # fuzz test with original test data
    # get test data
    data_list = "../common/dataset/MNIST/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=False)
    test_images = []
    test_labels = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
    test_images = np.concatenate(test_images, axis=0)
    test_labels = np.concatenate(test_labels, axis=0)
    initial_seeds = []

    # make initial seeds
    for img, label in zip(test_images, test_labels):
        initial_seeds.append([img, label])

    initial_seeds = initial_seeds[:100]
    model_coverage_test.calculate_coverage(
        np.array(test_images[:100]).astype(np.float32))
    LOGGER.info(TAG, 'KMNC of this test is : %s',
                model_coverage_test.get_kmnc())

    model_fuzz_test = Fuzzer(model, train_images, neuron_num, segmented_num)
    _, _, _, _, metrics = model_fuzz_test.fuzzing(mutate_config, initial_seeds, eval_metrics='auto')
    if metrics:
        for key in metrics:
            LOGGER.info(TAG, key + ': %s', metrics[key])
Beispiel #6
0
def example_lenet_mnist_fuzzing():
    """
    An example of fuzz testing and then enhance the non-robustness model.
    """
    # upload trained network
    ckpt_path = '../common/networks/lenet5/trained_ckpt_file/lenet_m1-10_1250.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)
    model = Model(net)
    mutate_config = [{'method': 'Blur',
                      'params': {'auto_param': [True]}},
                     {'method': 'Contrast',
                      'params': {'auto_param': [True]}},
                     {'method': 'Translate',
                      'params': {'auto_param': [True]}},
                     {'method': 'Brightness',
                      'params': {'auto_param': [True]}},
                     {'method': 'Noise',
                      'params': {'auto_param': [True]}},
                     {'method': 'Scale',
                      'params': {'auto_param': [True]}},
                     {'method': 'Shear',
                      'params': {'auto_param': [True]}},
                     {'method': 'FGSM',
                      'params': {'eps': [0.3, 0.2, 0.4], 'alpha': [0.1]}}
                     ]

    # get training data
    data_list = "../common/dataset/MNIST/train"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=False)
    train_images = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        train_images.append(images)
    train_images = np.concatenate(train_images, axis=0)

    # initialize fuzz test with training dataset
    model_coverage_test = ModelCoverageMetrics(model, 10, 1000, train_images)

    # fuzz test with original test data
    # get test data
    data_list = "../common/dataset/MNIST/test"
    batch_size = 32
    init_samples = 5000
    max_iters = 50000
    mutate_num_per_seed = 10
    ds = generate_mnist_dataset(data_list, batch_size, num_samples=init_samples,
                                sparse=False)
    test_images = []
    test_labels = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
    test_images = np.concatenate(test_images, axis=0)
    test_labels = np.concatenate(test_labels, axis=0)
    initial_seeds = []

    # make initial seeds
    for img, label in zip(test_images, test_labels):
        initial_seeds.append([img, label])

    model_coverage_test.calculate_coverage(
        np.array(test_images[:100]).astype(np.float32))
    LOGGER.info(TAG, 'KMNC of test dataset before fuzzing is : %s',
                model_coverage_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of test dataset before fuzzing is : %s',
                model_coverage_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of test dataset before fuzzing is : %s',
                model_coverage_test.get_snac())

    model_fuzz_test = Fuzzer(model, train_images, 10, 1000)
    gen_samples, gt, _, _, metrics = model_fuzz_test.fuzzing(mutate_config,
                                                             initial_seeds,
                                                             eval_metrics='auto',
                                                             max_iters=max_iters,
                                                             mutate_num_per_seed=mutate_num_per_seed)

    if metrics:
        for key in metrics:
            LOGGER.info(TAG, key + ': %s', metrics[key])

    def split_dataset(image, label, proportion):
        """
        Split the generated fuzz data into train and test set.
        """
        indices = np.arange(len(image))
        random.shuffle(indices)
        train_length = int(len(image) * proportion)
        train_image = [image[i] for i in indices[:train_length]]
        train_label = [label[i] for i in indices[:train_length]]
        test_image = [image[i] for i in indices[:train_length]]
        test_label = [label[i] for i in indices[:train_length]]
        return train_image, train_label, test_image, test_label

    train_image, train_label, test_image, test_label = split_dataset(
        gen_samples, gt, 0.7)

    # load model B and test it on the test set
    ckpt_path = '../common/networks/lenet5/trained_ckpt_file/lenet_m2-10_1250.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)
    model_b = Model(net)
    pred_b = model_b.predict(Tensor(test_image, dtype=mindspore.float32)).asnumpy()
    acc_b = np.sum(np.argmax(pred_b, axis=1) == np.argmax(test_label, axis=1)) / len(test_label)
    print('Accuracy of model B on test set is ', acc_b)

    # enhense model robustness
    lr = 0.001
    momentum = 0.9
    loss_fn = SoftmaxCrossEntropyWithLogits(Sparse=True)
    optimizer = Momentum(net.trainable_params(), lr, momentum)

    adv_defense = AdversarialDefense(net, loss_fn, optimizer)
    adv_defense.batch_defense(np.array(train_image).astype(np.float32),
                              np.argmax(train_label, axis=1).astype(np.int32))
    preds_en = net(Tensor(test_image, dtype=mindspore.float32)).asnumpy()
    acc_en = np.sum(np.argmax(preds_en, axis=1) == np.argmax(test_label, axis=1)) / len(test_label)
    print('Accuracy of enhensed model on test set is ', acc_en)