Пример #1
0
def test_cell_list():
    input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
    input_me = Tensor(input_np)
    net = Net3()
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(net)
    model.predict(input_me)
Пример #2
0
def function_access_base(number):
    """ function_access_base """
    input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
    input_me = Tensor(input_np)
    if number == 2:
        net = access2_net(number)
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(net)
    model.predict(input_me)
Пример #3
0
def logical_operator_base(symbol):
    """ logical_operator_base """
    input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
    input_me = Tensor(input_np)
    logical_operator = {"and": 1, "or": 2}
    x = logical_operator[symbol]
    net = logical_Net(x)
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(net)
    model.predict(input_me)
Пример #4
0
def test_model_build_abnormal_string():
    """ test_model_build_abnormal_string """
    net = nn.ReLU()
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(net)
    err = False
    try:
        model.predict('aaa')
    except ValueError as e:
        log.error("Find value error: %r ", e)
        err = True
    finally:
        assert err
Пример #5
0
def test_edge_case():
    context.set_context(mode=context.GRAPH_MODE)
    inputs = Tensor(np.ones([32, 48]).astype(np.float32))
    net = Net()
    model = Model(net)
    with pytest.raises(RuntimeError):
        model.infer_predict_layout(inputs)
    context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
    with pytest.raises(RuntimeError):
        model.infer_predict_layout(inputs)
    context.set_auto_parallel_context(full_batch=True,
                                      enable_parallel_optimizer=True)
    with pytest.raises(RuntimeError):
        model.predict(inputs)
Пример #6
0
def test_single_input():
    """ test_single_input """
    input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32))
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(Net())
    out = model.predict(input_data)
    assert out is not None
Пример #7
0
def predict_checke_param(in_str):
    """ predict_checke_param """
    net = LeNet5()  # neural network
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(net)

    a1, a2, b1, b2, b3, b4 = in_str.strip().split()
    a1 = int(a1)
    a2 = int(a2)
    b1 = int(b1)
    b2 = int(b2)
    b3 = int(b3)
    b4 = int(b4)

    nd_data = np.random.randint(a1, a2, [b1, b2, b3, b4])
    input_data = Tensor(nd_data, mindspore.float32)
    model.predict(input_data)
Пример #8
0
def test_multiple_argument():
    """ test_multiple_argument """
    input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32))
    input_label = Tensor(np.random.randint(0, 3, [1, 3]).astype(np.float32))
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(LossNet())
    out = model.predict(input_data, input_label)
    assert out is not None
Пример #9
0
def run_test(netclass, count):
    context.set_context(mode=context.GRAPH_MODE)
    net = netclass()
    model = Model(net)
    for _ in range(count):
        input_np = np.random.randn(2, 3).astype(np.float32)
        input_ms = Tensor(input_np)
        output_np = net.construct(input_np)  # run python
        output_ms = model.predict(input_ms)  # run graph
        assert np.shape(output_np) == np.shape(output_ms.asnumpy())
Пример #10
0
def arithmetic_operator_base(symbol):
    """ arithmetic_operator_base """
    input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
    input_me = Tensor(input_np)
    logical_operator = {
        "++": 1,
        "--": 2,
        "+": 3,
        "-": 4,
        "*": 5,
        "/": 6,
        "%": 7,
        "not": 8
    }
    x = logical_operator[symbol]
    net = arithmetic_Net(x)
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(net)
    model.predict(input_me)
Пример #11
0
def test_distribute_predict_auto_parallel():
    context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
    context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, full_batch=True)
    inputs = Tensor(np.ones([32, 64, 128]).astype(np.float32))
    net = Net()
    model = Model(net)
    predict_map = model.infer_predict_layout(inputs)
    output = model.predict(inputs)
    context.reset_auto_parallel_context()
    return predict_map, output
Пример #12
0
def run_test(netclass, count, dev):
    context.set_context(mode=context.GRAPH_MODE, device_target=dev)
    net = netclass()
    model = Model(net)
    for _ in range(count):
        input_np = np.random.randn(2, 3).astype(np.float32)
        input_ms = Tensor(input_np)
        output_np = net.construct(input_np)  # run python
        output_ms = model.predict(input_ms)  # run graph
        np.testing.assert_array_almost_equal(output_np,
                                             output_ms.asnumpy(),
                                             decimal=3)
Пример #13
0
def test_inference():
    """distributed inference after distributed training"""
    context.set_context(mode=context.GRAPH_MODE)
    init(backend_name="hccl")
    context.set_auto_parallel_context(full_batch=True, parallel_mode="semi_auto_parallel",
                                      strategy_ckpt_load_file="./train_strategy.ckpt", device_num=8)

    predict_data = create_predict_data()
    network = Net(matmul_size=(96, 16))
    model = Model(network)
    predict_layout = model.infer_predict_layout(Tensor(predict_data))
    ckpt_file_list = create_ckpt_file_list()
    load_distributed_checkpoint(network, ckpt_file_list, predict_layout)
    predict_result = model.predict(predict_data)
    print(predict_result)
Пример #14
0
def test_different_args_run():
    """ test_different_args_run """
    np1 = np.random.randn(2, 3, 4, 5).astype(np.float32)
    input_me1 = Tensor(np1)
    np2 = np.random.randn(2, 3, 4, 5).astype(np.float32)
    input_me2 = Tensor(np2)

    net = Net2()
    net = add_flags(net, predit=True)
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(net)
    me1 = model.predict(input_me1)
    me2 = model.predict(input_me2)
    out_me1 = me1.asnumpy()
    out_me2 = me2.asnumpy()
    print(np1)
    print(np2)
    print(out_me1)
    print(out_me2)
    assert not np.allclose(out_me1, out_me2, 0.01, 0.01)
Пример #15
0
def test_net(data_dir, seg_dir, ckpt_path, config=None):
    eval_dataset = create_dataset(data_path=data_dir,
                                  seg_path=seg_dir,
                                  config=config,
                                  is_training=False)
    eval_data_size = eval_dataset.get_dataset_size()
    print("train dataset length is:", eval_data_size)

    network = UNet3d(config=config)
    network.set_train(False)
    param_dict = load_checkpoint(ckpt_path)
    load_param_into_net(network, param_dict)
    model = Model(network)
    index = 0
    total_dice = 0
    for batch in eval_dataset.create_dict_iterator(num_epochs=1,
                                                   output_numpy=True):
        image = batch["image"]
        seg = batch["seg"]
        print("current image shape is {}".format(image.shape), flush=True)
        sliding_window_list, slice_list = create_sliding_window(
            image, config.roi_size, config.overlap)
        image_size = (config.batch_size, config.num_classes) + image.shape[2:]
        output_image = np.zeros(image_size, np.float32)
        count_map = np.zeros(image_size, np.float32)
        importance_map = np.ones(config.roi_size, np.float32)
        for window, slice_ in zip(sliding_window_list, slice_list):
            window_image = Tensor(window, mstype.float32)
            pred_probs = model.predict(window_image)
            output_image[slice_] += pred_probs.asnumpy()
            count_map[slice_] += importance_map
        output_image = output_image / count_map
        dice, _ = CalculateDice(output_image, seg)
        print("The {} batch dice is {}".format(index, dice), flush=True)
        total_dice += dice
        index = index + 1
    avg_dice = total_dice / eval_data_size
    print(
        "**********************End Eval***************************************"
    )
    print("eval average dice is {}".format(avg_dice))
Пример #16
0
def test_lbfgs_attack():
    """
    LBFGS-Attack test for CPU device.
    """
    # upload trained network
    ckpt_path = '../../../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)

    # get test data
    data_list = "../../../common/dataset/MNIST/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size=batch_size)

    # prediction accuracy before attack
    model = Model(net)
    batch_num = 3  # the number of batches of attacking samples
    test_images = []
    test_labels = []
    predict_labels = []
    i = 0
    for data in ds.create_tuple_iterator(output_numpy=True):
        i += 1
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
        pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
                                axis=1)
        predict_labels.append(pred_labels)
        if i >= batch_num:
            break
    predict_labels = np.concatenate(predict_labels)
    true_labels = np.concatenate(test_labels)
    accuracy = np.mean(np.equal(predict_labels, true_labels))
    LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

    # attacking
    is_targeted = True
    if is_targeted:
        targeted_labels = np.random.randint(0, 10, size=len(true_labels)).astype(np.int32)
        for i, true_l in enumerate(true_labels):
            if targeted_labels[i] == true_l:
                targeted_labels[i] = (targeted_labels[i] + 1) % 10
    else:
        targeted_labels = true_labels.astype(np.int32)
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    attack = LBFGS(net, is_targeted=is_targeted, loss_fn=loss)
    start_time = time.clock()
    adv_data = attack.batch_generate(np.concatenate(test_images),
                                     targeted_labels,
                                     batch_size=batch_size)
    stop_time = time.clock()
    pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
    # rescale predict confidences into (0, 1).
    pred_logits_adv = softmax(pred_logits_adv, axis=1)
    pred_labels_adv = np.argmax(pred_logits_adv, axis=1)

    accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
    LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
                accuracy_adv)
    attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1),
                                     np.eye(10)[true_labels],
                                     adv_data.transpose(0, 2, 3, 1),
                                     pred_logits_adv,
                                     targeted=is_targeted,
                                     target_label=targeted_labels)
    LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
                attack_evaluate.mis_classification_rate())
    LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
                attack_evaluate.avg_conf_adv_class())
    LOGGER.info(TAG, 'The average confidence of true class is : %s',
                attack_evaluate.avg_conf_true_class())
    LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
                     'samples and adversarial samples are: %s',
                attack_evaluate.avg_lp_distance())
    LOGGER.info(TAG, 'The average structural similarity between original '
                     'samples and adversarial samples are: %s',
                attack_evaluate.avg_ssim())
    LOGGER.info(TAG, 'The average costing time is %s',
                (stop_time - start_time)/(batch_num*batch_size))
Пример #17
0
        self.fc2 = nn.Dense(hidden_size, 1)
        self.sig = ops.Sigmoid()

    def construct(self, x):
        x = self.fc1(x)
        x = self.sig(x)
        x = self.fc2(x)
        return x


m = Net(HIDDEN_SIZE)
optim = nn.Momentum(m.trainable_params(), 0.05, 0.9)

loss = nn.MSELoss()

loss_cb = LossMonitor()

model = Model(m, loss, optim, {'acc': Accuracy()})

time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())

model.train(ITERATIONS,
            ds_train,
            callbacks=[time_cb, loss_cb],
            dataset_sink_mode=False)

print("TF", model.predict(Tensor([[1, 0]], mindspore.float32)).asnumpy())
print("FF", model.predict(Tensor([[0, 0]], mindspore.float32)).asnumpy())
print("TT", model.predict(Tensor([[1, 1]], mindspore.float32)).asnumpy())
print("FT", model.predict(Tensor([[0, 1]], mindspore.float32)).asnumpy())
Пример #18
0
def test_fast_gradient_sign_method():
    """
    FGSM-Attack test
    """
    # upload trained network
    ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_name)
    load_param_into_net(net, load_dict)

    # get test data
    data_list = "./MNIST_unzip/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=False)

    # prediction accuracy before attack
    model = Model(net)
    batch_num = 3  # the number of batches of attacking samples
    test_images = []
    test_labels = []
    predict_labels = []
    i = 0
    for data in ds.create_tuple_iterator():
        i += 1
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
        pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
                                axis=1)
        predict_labels.append(pred_labels)
        if i >= batch_num:
            break
    predict_labels = np.concatenate(predict_labels)
    true_labels = np.argmax(np.concatenate(test_labels), axis=1)
    accuracy = np.mean(np.equal(predict_labels, true_labels))
    LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

    # attacking
    attack = FastGradientSignMethod(net, eps=0.3)
    start_time = time.clock()
    adv_data = attack.batch_generate(np.concatenate(test_images),
                                     np.concatenate(test_labels),
                                     batch_size=32)
    stop_time = time.clock()
    np.save('./adv_data', adv_data)
    pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
    # rescale predict confidences into (0, 1).
    pred_logits_adv = softmax(pred_logits_adv, axis=1)
    pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
    accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
    LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
                accuracy_adv)
    attack_evaluate = AttackEvaluate(
        np.concatenate(test_images).transpose(0, 2, 3, 1),
        np.concatenate(test_labels), adv_data.transpose(0, 2, 3, 1),
        pred_logits_adv)
    LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
                attack_evaluate.mis_classification_rate())
    LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
                attack_evaluate.avg_conf_adv_class())
    LOGGER.info(TAG, 'The average confidence of true class is : %s',
                attack_evaluate.avg_conf_true_class())
    LOGGER.info(
        TAG, 'The average distance (l0, l2, linf) between original '
        'samples and adversarial samples are: %s',
        attack_evaluate.avg_lp_distance())
    LOGGER.info(
        TAG, 'The average structural similarity between original '
        'samples and adversarial samples are: %s', attack_evaluate.avg_ssim())
    LOGGER.info(TAG, 'The average costing time is %s',
                (stop_time - start_time) / (batch_num * batch_size))
Пример #19
0
def test_momentum_diverse_input_iterative_method():
    """
    M-DI2-FGSM Attack Test for CPU device.
    """
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
    # upload trained network
    ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_name)
    load_param_into_net(net, load_dict)

    # get test data
    data_list = "./MNIST_unzip/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size)

    # prediction accuracy before attack
    model = Model(net)
    batch_num = 32  # the number of batches of attacking samples
    test_images = []
    test_labels = []
    predict_labels = []
    i = 0
    for data in ds.create_tuple_iterator():
        i += 1
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
        pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
                                axis=1)
        predict_labels.append(pred_labels)
        if i >= batch_num:
            break
    predict_labels = np.concatenate(predict_labels)
    true_labels = np.concatenate(test_labels)
    accuracy = np.mean(np.equal(predict_labels, true_labels))
    LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

    # attacking
    loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
    attack = MomentumDiverseInputIterativeMethod(net, loss_fn=loss)
    start_time = time.clock()
    adv_data = attack.batch_generate(np.concatenate(test_images),
                                     true_labels,
                                     batch_size=32)
    stop_time = time.clock()
    pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
    # rescale predict confidences into (0, 1).
    pred_logits_adv = softmax(pred_logits_adv, axis=1)
    pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
    accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
    LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
                accuracy_adv)
    attack_evaluate = AttackEvaluate(
        np.concatenate(test_images).transpose(0, 2, 3, 1),
        np.eye(10)[true_labels], adv_data.transpose(0, 2, 3, 1),
        pred_logits_adv)
    LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
                attack_evaluate.mis_classification_rate())
    LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
                attack_evaluate.avg_conf_adv_class())
    LOGGER.info(TAG, 'The average confidence of true class is : %s',
                attack_evaluate.avg_conf_true_class())
    LOGGER.info(
        TAG, 'The average distance (l0, l2, linf) between original '
        'samples and adversarial samples are: %s',
        attack_evaluate.avg_lp_distance())
    LOGGER.info(
        TAG, 'The average structural similarity between original '
        'samples and adversarial samples are: %s', attack_evaluate.avg_ssim())
    LOGGER.info(TAG, 'The average costing time is %s',
                (stop_time - start_time) / (batch_num * batch_size))
Пример #20
0
         np0_image = Image.open(image_path).convert("RGB")
         
         np1_image = np.array(np0_image)
         #np1_image = np.transpose(np0_image,(2,0,1))
         #print("image: {}".format(np1_image))
         
         np2_image = np.transpose(np1_image,(2,0,1))
         #np2_image = np.array(np1_image)
         #print("shape: {}".format(np2_image.shape), ", dtype: {}".format(np2_image.dtype))
         
         np_image = np.array([np2_image], dtype=np.float32)
         #print("shape: {}".format(np_image.shape), ", dtype: {}".format(np_image.dtype))
         
         # 图像处理
         input_data = Tensor(np_image,ms.float32)
         pred = model.predict(input_data)
         pred = list(pred)
         #print("label: {}".format( pred.argmax(axis=1) ) )
         
         label=pred.index(max(pred))
         print("label: {}".format( label ) )
         
         labels.append(label)
         images.append(np_image)
         count+=1
         #if count==10:
         #    break
 
 print("end predict")
 with open("result.csv", mode='w', newline='') as csv_p:
     fieldnames = ['label','shot']
Пример #21
0
def test_jsma_attack():
    """
    JSMA-Attack test
    """
    # upload trained network
    ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_name)
    load_param_into_net(net, load_dict)

    # get test data
    data_list = "./MNIST_unzip/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size=batch_size)

    # prediction accuracy before attack
    model = Model(net)
    batch_num = 3  # the number of batches of attacking samples
    test_images = []
    test_labels = []
    predict_labels = []
    i = 0
    for data in ds.create_tuple_iterator():
        i += 1
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
        pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
                                axis=1)
        predict_labels.append(pred_labels)
        if i >= batch_num:
            break
    predict_labels = np.concatenate(predict_labels)
    true_labels = np.concatenate(test_labels)
    targeted_labels = np.random.randint(0, 10, size=len(true_labels))
    for i in range(len(true_labels)):
        if targeted_labels[i] == true_labels[i]:
            targeted_labels[i] = (targeted_labels[i] + 1) % 10
    accuracy = np.mean(np.equal(predict_labels, true_labels))
    LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy)

    # attacking
    classes = 10
    attack = JSMAAttack(net, classes)
    start_time = time.clock()
    adv_data = attack.batch_generate(np.concatenate(test_images),
                                     targeted_labels,
                                     batch_size=32)
    stop_time = time.clock()
    pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
    # rescale predict confidences into (0, 1).
    pred_logits_adv = softmax(pred_logits_adv, axis=1)
    pred_lables_adv = np.argmax(pred_logits_adv, axis=1)
    accuracy_adv = np.mean(np.equal(pred_lables_adv, true_labels))
    LOGGER.info(TAG, "prediction accuracy after attacking is : %g",
                accuracy_adv)
    test_labels = np.eye(10)[np.concatenate(test_labels)]
    attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(
        0, 2, 3, 1),
                                     test_labels,
                                     adv_data.transpose(0, 2, 3, 1),
                                     pred_logits_adv,
                                     targeted=True,
                                     target_label=targeted_labels)
    LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
                attack_evaluate.mis_classification_rate())
    LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
                attack_evaluate.avg_conf_adv_class())
    LOGGER.info(TAG, 'The average confidence of true class is : %s',
                attack_evaluate.avg_conf_true_class())
    LOGGER.info(
        TAG, 'The average distance (l0, l2, linf) between original '
        'samples and adversarial samples are: %s',
        attack_evaluate.avg_lp_distance())
    LOGGER.info(
        TAG, 'The average structural similarity between original '
        'samples and adversarial samples are: %s', attack_evaluate.avg_ssim())
    LOGGER.info(TAG, 'The average costing time is %s',
                (stop_time - start_time) / (batch_num * batch_size))
def test_deepfool_attack():
    """
    DeepFool-Attack test
    """
    # upload trained network
    ckpt_path = '../../../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)

    # get test data
    data_list = "../../../common/dataset/MNIST/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size=batch_size)

    # prediction accuracy before attack
    model = Model(net)
    batch_num = 3  # the number of batches of attacking samples
    test_images = []
    test_labels = []
    predict_labels = []
    i = 0
    for data in ds.create_tuple_iterator(output_numpy=True):
        i += 1
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
        pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
                                axis=1)
        predict_labels.append(pred_labels)
        if i >= batch_num:
            break
    predict_labels = np.concatenate(predict_labels)
    true_labels = np.concatenate(test_labels)
    accuracy = np.mean(np.equal(predict_labels, true_labels))
    LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

    # attacking
    classes = 10
    attack = DeepFool(net, classes, norm_level=2, bounds=(0.0, 1.0))
    start_time = time.clock()
    adv_data = attack.batch_generate(np.concatenate(test_images),
                                     np.concatenate(test_labels),
                                     batch_size=32)
    stop_time = time.clock()
    pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
    # rescale predict confidences into (0, 1).
    pred_logits_adv = softmax(pred_logits_adv, axis=1)
    pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
    accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
    LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
                accuracy_adv)
    test_labels = np.eye(10)[np.concatenate(test_labels)]
    attack_evaluate = AttackEvaluate(
        np.concatenate(test_images).transpose(0, 2, 3, 1), test_labels,
        adv_data.transpose(0, 2, 3, 1), pred_logits_adv)
    LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
                attack_evaluate.mis_classification_rate())
    LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
                attack_evaluate.avg_conf_adv_class())
    LOGGER.info(TAG, 'The average confidence of true class is : %s',
                attack_evaluate.avg_conf_true_class())
    LOGGER.info(
        TAG, 'The average distance (l0, l2, linf) between original '
        'samples and adversarial samples are: %s',
        attack_evaluate.avg_lp_distance())
    LOGGER.info(
        TAG, 'The average structural similarity between original '
        'samples and adversarial samples are: %s', attack_evaluate.avg_ssim())
    LOGGER.info(TAG, 'The average costing time is %s',
                (stop_time - start_time) / (batch_num * batch_size))
Пример #23
0
net = LeNet5()
mnist_path = "./datasets/MNIST_Data/"
param_dict = load_checkpoint("checkpoint_lenet-1_1875.ckpt")
load_param_into_net(net, param_dict)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
net_opt = nn.Momentum(net.trainable_params(), learning_rate=0.01, momentum=0.9)
model = Model(net, net_loss, net_opt, metrics={"Accuracy": nn.Accuracy()})

ds_test = create_dataset(os.path.join(mnist_path, "test"),
                         batch_size=32).create_dict_iterator(output_numpy=True)

for data in ds_test:
    images = data['image'].astype(np.float32)
    labels = data['label']
    test_images.append(images)
    test_labels.append(labels)
    pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), axis=1)
    predict_labels.append(pred_labels)

test_images = np.concatenate(test_images)
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)

fgsm = FastGradientSignMethod(net, eps=args.eps, loss_fn=net_loss)
advs = fgsm.batch_generate(test_images, true_labels, batch_size=32)

adv_predicts = model.predict(Tensor(advs)).asnumpy()
adv_predicts = np.argmax(adv_predicts, axis=1)
accuracy = np.mean(np.equal(adv_predicts, true_labels))
print(accuracy)
Пример #24
0
def example_lenet_mnist_fuzzing():
    """
    An example of fuzz testing and then enhance the non-robustness model.
    """
    # upload trained network
    ckpt_path = '../common/networks/lenet5/trained_ckpt_file/lenet_m1-10_1250.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)
    model = Model(net)
    mutate_config = [{'method': 'Blur',
                      'params': {'auto_param': [True]}},
                     {'method': 'Contrast',
                      'params': {'auto_param': [True]}},
                     {'method': 'Translate',
                      'params': {'auto_param': [True]}},
                     {'method': 'Brightness',
                      'params': {'auto_param': [True]}},
                     {'method': 'Noise',
                      'params': {'auto_param': [True]}},
                     {'method': 'Scale',
                      'params': {'auto_param': [True]}},
                     {'method': 'Shear',
                      'params': {'auto_param': [True]}},
                     {'method': 'FGSM',
                      'params': {'eps': [0.3, 0.2, 0.4], 'alpha': [0.1]}}
                     ]

    # get training data
    data_list = "../common/dataset/MNIST/train"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=False)
    train_images = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        train_images.append(images)
    train_images = np.concatenate(train_images, axis=0)

    # initialize fuzz test with training dataset
    model_coverage_test = ModelCoverageMetrics(model, 10, 1000, train_images)

    # fuzz test with original test data
    # get test data
    data_list = "../common/dataset/MNIST/test"
    batch_size = 32
    init_samples = 5000
    max_iters = 50000
    mutate_num_per_seed = 10
    ds = generate_mnist_dataset(data_list, batch_size, num_samples=init_samples,
                                sparse=False)
    test_images = []
    test_labels = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
    test_images = np.concatenate(test_images, axis=0)
    test_labels = np.concatenate(test_labels, axis=0)
    initial_seeds = []

    # make initial seeds
    for img, label in zip(test_images, test_labels):
        initial_seeds.append([img, label])

    model_coverage_test.calculate_coverage(
        np.array(test_images[:100]).astype(np.float32))
    LOGGER.info(TAG, 'KMNC of test dataset before fuzzing is : %s',
                model_coverage_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of test dataset before fuzzing is : %s',
                model_coverage_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of test dataset before fuzzing is : %s',
                model_coverage_test.get_snac())

    model_fuzz_test = Fuzzer(model, train_images, 10, 1000)
    gen_samples, gt, _, _, metrics = model_fuzz_test.fuzzing(mutate_config,
                                                             initial_seeds,
                                                             eval_metrics='auto',
                                                             max_iters=max_iters,
                                                             mutate_num_per_seed=mutate_num_per_seed)

    if metrics:
        for key in metrics:
            LOGGER.info(TAG, key + ': %s', metrics[key])

    def split_dataset(image, label, proportion):
        """
        Split the generated fuzz data into train and test set.
        """
        indices = np.arange(len(image))
        random.shuffle(indices)
        train_length = int(len(image) * proportion)
        train_image = [image[i] for i in indices[:train_length]]
        train_label = [label[i] for i in indices[:train_length]]
        test_image = [image[i] for i in indices[:train_length]]
        test_label = [label[i] for i in indices[:train_length]]
        return train_image, train_label, test_image, test_label

    train_image, train_label, test_image, test_label = split_dataset(
        gen_samples, gt, 0.7)

    # load model B and test it on the test set
    ckpt_path = '../common/networks/lenet5/trained_ckpt_file/lenet_m2-10_1250.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)
    model_b = Model(net)
    pred_b = model_b.predict(Tensor(test_image, dtype=mindspore.float32)).asnumpy()
    acc_b = np.sum(np.argmax(pred_b, axis=1) == np.argmax(test_label, axis=1)) / len(test_label)
    print('Accuracy of model B on test set is ', acc_b)

    # enhense model robustness
    lr = 0.001
    momentum = 0.9
    loss_fn = SoftmaxCrossEntropyWithLogits(Sparse=True)
    optimizer = Momentum(net.trainable_params(), lr, momentum)

    adv_defense = AdversarialDefense(net, loss_fn, optimizer)
    adv_defense.batch_defense(np.array(train_image).astype(np.float32),
                              np.argmax(train_label, axis=1).astype(np.int32))
    preds_en = net(Tensor(test_image, dtype=mindspore.float32)).asnumpy()
    acc_en = np.sum(np.argmax(preds_en, axis=1) == np.argmax(test_label, axis=1)) / len(test_label)
    print('Accuracy of enhensed model on test set is ', acc_en)