예제 #1
0
def main(use_cuda):
    """
    Advbox demo which demonstrate how to use advbox.
    """
    main_prog = fluid.default_main_program()
    output_target = './datasets/output_image/'
    if not os.path.exists(output_target):
        os.makedirs(output_target)
    IMG_NAME = 'img'
    LABEL_NAME = 'label'
    global_id = 0

    img = fluid.layers.data(name=IMG_NAME,
                            shape=[3, 224, 224],
                            dtype='float32')
    label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64')
    noise = fluid.layers.create_parameter(
        name="noise",
        shape=[batch_size, 3, 224, 224],
        dtype='float32',
        default_initializer=fluid.initializer.Constant(0.0000001))

    true_image = noise + img

    r_image = fluid.layers.crop(true_image,
                                shape=[batch_size, 1, 224, 224],
                                offsets=[0, 0, 0, 0],
                                name='r_image')
    g_image = fluid.layers.crop(true_image,
                                shape=[batch_size, 1, 224, 224],
                                offsets=[0, 1, 0, 0],
                                name='g_image')
    b_image = fluid.layers.crop(true_image,
                                shape=[batch_size, 1, 224, 224],
                                offsets=[0, 2, 0, 0],
                                name='b_image')

    max_mean = [0.485, 0.456, 0.406]
    max_std = [0.229, 0.224, 0.225]
    r_max = (1 - max_mean[0]) / max_std[0]
    g_max = (1 - max_mean[1]) / max_std[1]
    b_max = (1 - max_mean[2]) / max_std[2]

    r_min = (0 - max_mean[0]) / max_std[0]
    g_min = (0 - max_mean[1]) / max_std[1]
    b_min = (0 - max_mean[2]) / max_std[2]
    r_image = fluid.layers.clip(x=r_image, min=r_min, max=r_max)
    g_image = fluid.layers.clip(x=g_image, min=g_min, max=g_max)
    b_image = fluid.layers.clip(x=b_image, min=b_min, max=b_max)

    true_image = fluid.layers.concat([r_image, g_image, b_image], axis=1)

    loss, outs = create_net(true_image, label)

    std = fluid.layers.assign(
        np.array([[[0.229]], [[0.224]], [[0.225]]]).astype('float32'))

    square = fluid.layers.square(noise * std * 255.0)
    # avg l2 norm
    loss2 = fluid.layers.reduce_sum(square, dim=1)
    loss2 = fluid.layers.sqrt(loss2)
    loss2 = fluid.layers.reduce_mean(loss2)

    #avg mse
    # loss2 = fluid.layers.reduce_mean(square)

    loss = loss + 0.005 * loss2

    init_prog(main_prog)
    test_prog = main_prog.clone()
    lr = fluid.layers.create_global_var(shape=[1],
                                        value=0.02,
                                        dtype='float32',
                                        persistable=True,
                                        name='learning_rate_0')

    opt = fluid.optimizer.Adam(learning_rate=lr)
    opt.minimize(loss, parameter_list=[noise.name])

    # 根据配置选择使用CPU资源还是GPU资源
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    exe.run(fluid.default_startup_program())

    test_reader = paddle.batch(test_set(), batch_size=batch_size)

    load_params(exe)

    fail_count = 0

    for block in main_prog.blocks:
        for var in block.vars.keys():
            if 'learning_rate' in var:
                pd_lr = fluid.global_scope().find_var(var)
                print(var)
            if 'beta1_pow_acc' in var:
                pd_noise_beta1 = fluid.global_scope().find_var(var)
                print(var)
            if 'moment1' in var:
                pd_noise_mom1 = fluid.global_scope().find_var(var)
                print(var)
            if 'beta2_pow_acc' in var:
                pd_noise_beta2 = fluid.global_scope().find_var(var)
                print(var)
            if 'moment2' in var:
                pd_noise_mom2 = fluid.global_scope().find_var(var)
                print(var)
    print(np.array(pd_lr.get_tensor()))
    for train_id, data in enumerate(test_reader()):
        images = []
        labels = []
        filenames = []
        for i in range(batch_size):
            images.append(data[i][0][0])
            labels.append([data[i][1]])
            filenames.append(data[i][2])
            # image = data[0][0]
            # label = data[0][1]
            # label = np.array([[label]])
            # filename = data[0][2]
        images = np.array(images)
        labels = np.array(labels)
        for block in main_prog.blocks:
            for param in block.all_parameters():
                if param.name == 'noise':
                    pd_var = fluid.global_scope().find_var(param.name)
                    pd_param = pd_var.get_tensor()
                    print("load: {}, shape: {}".format(param.name,
                                                       param.shape))
                    print("Before setting the numpy array value: {}".format(
                        np.array(pd_param).ravel()[:5]))
                    noise_tensor = np.zeros(param.shape).astype('float32')
                    noise_tensor[:] = 1e-7
                    pd_param.set(noise_tensor, place)
                    print("After setting the numpy array value: {}".format(
                        np.array(pd_param).ravel()[:5]))
        # pd_lr.get_tensor().set(np.array([0.02]).astype('float32'), place)
        if batch_size > 1:
            pd_noise_beta1.get_tensor().set(
                np.array([0.9]).astype('float32'), place)
            pd_noise_beta2.get_tensor().set(
                np.array([0.999]).astype('float32'), place)
            pd_noise_mom1.get_tensor().set(
                np.zeros(shape=[batch_size, 3, 224, 224]).astype('float32'),
                place)
            pd_noise_mom2.get_tensor().set(
                np.zeros(shape=[batch_size, 3, 224, 224]).astype('float32'),
                place)

        i = 0
        fetch_list = [true_image, lr, loss, loss2, noise]
        mean_np = np.array([[[[0.485]], [[0.456]],
                             [[0.406]]]]).astype('float32')
        std_np = np.array([[[[0.229]], [[0.224]],
                            [[0.225]]]]).astype('float32')
        ori_img = np.round((images * std_np + mean_np) * 255.0)
        ori_img = np.clip(ori_img, 0, 255).astype('uint8')
        while True:
            if i == 0:
                test_vars = exe.run(program=test_prog,
                                    feed={
                                        'img': images,
                                        'label': labels
                                    },
                                    fetch_list=outs)

                for m in range(batch_size):
                    str = 'First step test network,id:{},'.format(global_id +
                                                                  1)
                    global_id += 1
                    adv_labels = []
                    for j in range(len(outs)):
                        o = test_vars[j][m]
                        adv_label = np.argmax(o)
                        adv_labels.append(adv_label)
                        str += 'adv{}:%d,'.format(j + 1)
                    print(str % (*adv_labels, ))

            train_vars = exe.run(program=fluid.default_main_program(),
                                 feed={
                                     'img': images,
                                     'label': labels
                                 },
                                 fetch_list=fetch_list)
            n = train_vars[-1]
            l2 = train_vars[-2]
            l1 = train_vars[-3]
            lr1 = train_vars[-4]
            tr_img = train_vars[-5]

            adv_img = n + images
            adv_img = np.round((adv_img * std_np + mean_np) * 255.0)
            adv_img = np.clip(adv_img, 0, 255).astype('uint8')

            diff = adv_img.astype('float32') - ori_img.astype('float32')
            avg_mse = diff * diff
            # avg l2 norm
            l2_norm = np.sum(avg_mse, axis=1)
            l2_norm = np.sqrt(l2_norm)
            l2_norm = np.mean(l2_norm, axis=(1, 2))
            # avg mse
            avg_mse = np.mean(avg_mse, axis=(1, 2, 3))

            test_vars = exe.run(program=test_prog,
                                feed={
                                    'img': images,
                                    'label': labels
                                },
                                fetch_list=outs)
            successful = batch_size * len(outs)
            for m in range(batch_size):
                str = 'batch:%d,id:{},lr:%f,loss1:%f,loss2:%f,avg_mse:%f,l2_norm:%f,'.format(
                    train_id * batch_size + m + 1)
                adv_labels = []
                for j in range(len(outs)):
                    o = test_vars[j][m]
                    adv_label = np.argmax(o)
                    adv_labels.append(adv_label)
                    str += 'adv{}:%d,'.format(j + 1)
                print(str %
                      (i, lr1, l1, l2, avg_mse[m], l2_norm[m], *adv_labels))

                for adv_label in adv_labels:
                    if adv_label == labels[m]:
                        successful -= 1

            i += 1
            if (successful >= batch_size * len(outs) - 1
                    and np.mean(l2_norm) < 1.0) or i == 3000:
                if successful >= batch_size * len(outs) - 1:
                    print('attack successful')
                else:
                    print('attack failed')
                    fail_count += 1
                break

        print("failed:%d" % (fail_count, ))

        adv_img = adv_img.astype('float32') / 255.0
        adv_img = adv_img - mean_np
        adv_img = adv_img / std_np

        for m in range(batch_size):
            adv_image = tensor2img(adv_img[m][np.newaxis, :, :, :])
            ori_image = tensor2img(images[m][np.newaxis, :, :, :])

            print('id:{},mse:{}'.format(train_id * batch_size + m + 1,
                                        call_avg_mse_np(adv_image, ori_image)))
            save_adv_image(
                adv_image,
                os.path.join(output_target,
                             filenames[m].split('.')[0] + '.png'))
    print("attack over ,failed:%d" % (fail_count, ))
예제 #2
0
파일: main.py 프로젝트: cucJ2014/AI-Studio
    noise_layer.stop_gradient = False
    raw_input = input_layer + noise_layer
    input_argued = input_diversity(raw_input, diversity_iter > 1)
    out_logits = 0
    # model definition
    for i in range(len(modelnames)):
        model = models.__dict__[modelnames[i]]()
        out_logits += model.net(input=input_argued,
                                class_dim=class_dim) * weights[i]
    out = fluid.layers.softmax(out_logits)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    #记载模型参数
    fluid.io.load_persistables(exe, pretrained_model)
#设置adv_program的BN层状态
init_prog(adv_program)
#创建测试用评估模式
eval_program = adv_program.clone(for_test=True)

#定义梯度
with fluid.program_guard(adv_program):
    label = fluid.layers.data(name="label", shape=[1], dtype='int64')
    loss_cls = fluid.layers.cross_entropy(input=out, label=label)
    loss_norm = fluid.layers.mse_loss(noise_layer, zero_input)
    loss = loss_cls - loss_norm * reg_const
    gradients = fluid.backward.gradients(targets=loss, inputs=[noise_layer])[0]


######Inference
def inference(img):
    fetch_list = [out.name]
예제 #3
0
model6 = models.__dict__[model_name6]()
out_logit6 = model6.net(input=input_layer, class_dim=class_dim)
out6 = fluid.layers.softmax(out_logit6)

model7 = models.__dict__[model_name7]()
out_logit7 = model7.net(input=input_layer, class_dim=class_dim)
out7 = fluid.layers.softmax(out_logit7)

model8 = models.__dict__[model_name8]()
out_logit8 = model8.net(input=input_layer, class_dim=class_dim)
out8 = fluid.layers.softmax(out_logit8)

fluid.io.load_persistables(exe, pretrained_model, main_program=main_programs)
print('ok')
init_prog(main_programs)
eval_program = main_programs.clone(for_test=True)

label = fluid.layers.data(name="label", shape=[1], dtype='int64')
y = fluid.layers.data(name="y", shape=[8], dtype='int64')
out_logits = (out_logit1[:, :121] * y[0] + out_logit2 * y[1] +
              out_logit3 * y[2] + out_logit4 * y[3] + out_logit5 * y[4] +
              out_logit6 * y[5] + out_logit7 * y[6] + out_logit8 * y[7]) / (
                  y[0] + y[1] + y[2] + y[3] + y[4] + y[5] + y[6] + y[7])
out = fluid.layers.softmax(out_logits)
loss = fluid.layers.cross_entropy(input=out, label=label)
gradients = fluid.gradients(targets=loss, inputs=[input_layer])[0]


def inference(img):
    result1, result2, result3, result4, result5, result6, result7, result8 = exe.run(
예제 #4
0
    Inception_out = fluid.layers.softmax(Inception_out_logits)

    Mob_model = models.__dict__[model_name3]()  # Mobile
    Mob_out_logits = Mob_model.net(input=input_layer, class_dim=class_dim)
    Mob_out = fluid.layers.softmax(Mob_out_logits)

    # place = fluid.CUDAPlace(0) if with_gpu else fluid.CPUPlace()
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    fluid.io.load_params(executor=exe,
                         dirname=model_params,
                         main_program=double_adv_program)

init_prog(double_adv_program)

# 创建测试用评估模式
double_eval_program = double_adv_program.clone(for_test=True)

with fluid.program_guard(double_adv_program):
    label = fluid.layers.data(name="label", shape=[1], dtype='int64')
    Res_loss = fluid.layers.cross_entropy(input=Res_out, label=label)
    Inception_loss = fluid.layers.cross_entropy(input=Inception_out,
                                                label=label)
    Mob_loss = fluid.layers.cross_entropy(input=Mob_out, label=label)
    loss = Res_loss * Res_ratio + Inception_loss * Incep_ratio + Mob_loss * Mob_ratio
    gradients = fluid.backward.gradients(targets=loss, inputs=[input_layer])[0]


######Inference
                                               resample='BILINEAR')

        out_logits = model.net(input=scale_back, class_dim=class_dim)
        out = fluid.layers.softmax(out_logits)
        tmp_loss = fluid.layers.cross_entropy(input=out, label=label)
        loss += tmp_loss

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    # print(fluid.default_startup_program())
    #记载模型参数
    fluid.io.load_persistables(exe, pretrained_model)

#设置adv_program的BN层状态
init_prog(adv_program)

evala_program = fluid.Program()
with fluid.program_guard(evala_program):
    input_layer_eval = fluid.layers.data(name='image',
                                         shape=image_shape,
                                         dtype='float32')
    out_logits_eval = model.net(input=input_layer_eval, class_dim=class_dim)
    out_eval = fluid.layers.softmax(out_logits_eval)

#创建测试用评估模式
eval_program = evala_program.clone(for_test=True)
init_prog(eval_program)

#定义梯度
with fluid.program_guard(adv_program):