def main(image_path): alexnet = mx.gluon.model_zoo.vision.alexnet(pretrained=True) # print(alexnet) orig = cv2.imread(image_path)[..., ::-1] orig = cv2.resize(orig, (224, 224)) img = orig.copy().astype(np.float32) mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] img /= 255.0 img = old_div((img - mean), std) img = img.transpose(2, 0, 1) img = np.expand_dims(img, axis=0) #array = mx.nd.array(img) # advbox demo m = MxNetModel(alexnet, None, (-1, 1), channel_axis=1) attack = FGSMT(m) #attack = FGSM(m) # 静态epsilons attack_config = {"epsilons": 0.2, "epsilon_steps": 1, "steps": 100} inputs = img #labels=388 labels = None print(inputs.shape) adversary = Adversary(inputs, labels) #adversary = Adversary(inputs, 388) tlabel = 538 adversary.set_target(is_targeted_attack=True, target_label=tlabel) adversary = attack(adversary, **attack_config) if adversary.is_successful(): print('attack success, adversarial_label=%d' % (adversary.adversarial_label)) adv = adversary.adversarial_example[0] adv = adv.transpose(1, 2, 0) adv = (adv * std) + mean adv = adv * 255.0 adv = adv[..., ::-1] # RGB to BGR adv = np.clip(adv, 0, 255).astype(np.uint8) cv2.imwrite('img_adv.png', adv) else: print('attack failed') print("fgsm attack done")
def main(use_cuda): """ Advbox example which demonstrate how to use advbox. """ # base marco TOTAL_NUM = 100 IMG_NAME = 'image' LABEL_NAME = 'label' # parse args args = parser.parse_args() print_arguments(args) # parameters from arguments class_dim = args.class_dim model_name = args.model target_class = args.target pretrained_model = args.pretrained_model image_shape = [int(m) for m in args.image_shape.split(",")] if args.log_debug: logging.getLogger().setLevel(logging.INFO) assert model_name in model_list, "{} is not in lists: {}".format( args.model, model_list) # model definition model = models.__dict__[model_name]() # declare vars image = fluid.layers.data(name=IMG_NAME, shape=image_shape, dtype='float32') logits = model.net(input=image, class_dim=class_dim) # clone program and graph for inference infer_program = fluid.default_main_program().clone(for_test=True) image.stop_gradient = False label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=logits, label=label) avg_cost = fluid.layers.mean(x=cost) BATCH_SIZE = 1 test_reader = paddle.batch(reader.test(TEST_LIST, DATA_PATH), batch_size=BATCH_SIZE) # setup run environment enable_gpu = use_cuda and args.use_gpu place = fluid.CUDAPlace(0) if enable_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) # advbox demo m = PaddleModel(fluid.default_main_program(), IMG_NAME, LABEL_NAME, logits.name, avg_cost.name, (0, 1), channel_axis=3) # Adversarial method: CW attack = CW_L2(m, learning_rate=0.1, attack_model=model.conv_net, with_gpu=enable_gpu, shape=image_shape, dim=class_dim, confidence_level=0.9, multi_clip=True) attack_config = { "attack_iterations": 50, "c_search_step": 10, "c_range": (0.01, 100), "c_start": 10, "targeted": True } # reload model vars if pretrained_model: def if_exist(var): return os.path.exists(os.path.join(pretrained_model, var.name)) fluid.io.load_vars(exe, pretrained_model, predicate=if_exist) # inference pred_label = infer(infer_program, image, logits, place, exe) # if only inference ,and exit if args.inference: exit(0) print("--------------------adversary-------------------") # use test data to generate adversarial examples total_count = 0 fooling_count = 0 for data in test_reader(): total_count += 1 data_img = [data[0][0]] filename = data[0][1] org_data = data_img[0][0] adversary = Adversary(org_data, pred_label[filename]) #target attack if target_class != -1: tlabel = target_class adversary.set_target(is_targeted_attack=True, target_label=tlabel) adversary = attack(adversary, **attack_config) if adversary.is_successful(): fooling_count += 1 print( 'attack success, original_label=%d, adversarial_label=%d, count=%d' % (pred_label[filename], adversary.adversarial_label, total_count)) #output original image, adversarial image and difference image generation_image(total_count, org_data, pred_label[filename], adversary.adversarial_example, adversary.adversarial_label, "CW") else: print('attack failed, original_label=%d, count=%d' % (pred_label[filename], total_count)) if total_count >= TOTAL_NUM: print( "[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f" % (fooling_count, total_count, float(fooling_count) / total_count)) break print("cw attack done")
def main(): """ Advbox demo which demonstrate how to use advbox. """ TOTAL_NUM = 500 IMG_NAME = 'img' LABEL_NAME = 'label' img = fluid.layers.data(name=IMG_NAME, shape=[1, 28, 28], dtype='float32') # gradient should flow img.stop_gradient = False label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64') logits = mnist_cnn_model(img) cost = fluid.layers.cross_entropy(input=logits, label=label) avg_cost = fluid.layers.mean(x=cost) # use CPU place = fluid.CPUPlace() # use GPU # place = fluid.CUDAPlace(0) exe = fluid.Executor(place) BATCH_SIZE = 1 train_reader = paddle.batch(paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=128 * 10), batch_size=BATCH_SIZE) test_reader = paddle.batch(paddle.reader.shuffle( paddle.dataset.mnist.test(), buf_size=128 * 10), batch_size=BATCH_SIZE) fluid.io.load_params(exe, "./mnist/", main_program=fluid.default_main_program()) # advbox demo m = PaddleModel(fluid.default_main_program(), IMG_NAME, LABEL_NAME, logits.name, avg_cost.name, (-1, 1), channel_axis=1) attack = ILCM(m) attack_config = {"epsilons": 0.1, "steps": 100} # use train data to generate adversarial examples total_count = 0 fooling_count = 0 for data in train_reader(): total_count += 1 adversary = Adversary(data[0][0], data[0][1]) tlabel = 0 adversary.set_target(is_targeted_attack=True, target_label=tlabel) # ILCM targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): fooling_count += 1 print( 'attack success, original_label=%d, adversarial_label=%d, count=%d' % (data[0][1], adversary.adversarial_label, total_count)) # plt.imshow(adversary.target, cmap='Greys_r') # plt.show() # np.save('adv_img', adversary.target) else: print('attack failed, original_label=%d, count=%d' % (data[0][1], total_count)) if total_count >= TOTAL_NUM: print( "[TRAIN_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f" % (fooling_count, total_count, float(fooling_count) / total_count)) break # use test data to generate adversarial examples total_count = 0 fooling_count = 0 for data in test_reader(): total_count += 1 adversary = Adversary(data[0][0], data[0][1]) tlabel = 0 adversary.set_target(is_targeted_attack=True, target_label=tlabel) # ILCM targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): fooling_count += 1 print( 'attack success, original_label=%d, adversarial_label=%d, count=%d' % (data[0][1], adversary.adversarial_label, total_count)) # plt.imshow(adversary.target, cmap='Greys_r') # plt.show() # np.save('adv_img', adversary.target) else: print('attack failed, original_label=%d, count=%d' % (data[0][1], total_count)) if total_count >= TOTAL_NUM: print( "[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f" % (fooling_count, total_count, float(fooling_count) / total_count)) break print("ilcm attack done")
def main(dirname,imagename): #加载解码的图像 这里是个大坑 tf提供的imagenet预训练好的模型pb文件中 包含针对图像的预处理环节 即解码jpg文件 这部分没有梯度 #需要直接处理解码后的数据 image=None with tf.gfile.Open(imagename, 'rb') as f: image = np.array( Image.open(f).convert('RGB')).astype(np.float) image=[image] session=tf.Session() def create_graph(dirname): with tf.gfile.FastGFile(dirname, 'rb') as f: graph_def = session.graph_def graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') create_graph(dirname) # 初始化参数 非常重要 session.run(tf.global_variables_initializer()) tensorlist=[n.name for n in session.graph_def.node] logger.info(tensorlist) #获取logits logits=session.graph.get_tensor_by_name('softmax/logits:0') x = session.graph.get_tensor_by_name('ExpandDims:0') #y = tf.placeholder(tf.int64, None, name='label') # advbox demo # 因为原始数据没有归一化 所以bounds=(0, 255) m = TensorflowModel( session, x, None, logits, None, bounds=(0, 255), channel_axis=3, preprocess=None) attack = DeepFoolAttack(m) attack_config = {"iterations": 100, "overshoot": 0.02} #y设置为空 会自动计算 adversary = Adversary(image,None) # FGSM non-targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): print( 'attack success, adversarial_label=%d' % (adversary.adversarial_label) ) #对抗样本保存在adversary.adversarial_example adversary_image=np.copy(adversary.adversarial_example) #print(adversary_image - image) #强制类型转换 之前是float 现在要转换成int8 adversary_image = np.array(adversary_image).astype("uint8").reshape([100,100,3]) logging.info(adversary_image - image) #print(adversary_image - image) im = Image.fromarray(adversary_image) im.save("adversary_image_nontarget.jpg") print("DeepFool non-target attack done") attack = DeepFoolAttack(m) attack_config = {"iterations": 100, "overshoot": 0.05} adversary = Adversary(image,None) #麦克风 tlabel = 651 adversary.set_target(is_targeted_attack=True, target_label=tlabel) # FGSM targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): print( 'attack success, adversarial_label=%d' % (adversary.adversarial_label) ) #对抗样本保存在adversary.adversarial_example adversary_image=np.copy(adversary.adversarial_example) #强制类型转换 之前是float 现在要转换成int8 logging.info(adversary_image-image) adversary_image = np.array(adversary_image).astype("uint8").reshape([100,100,3]) im = Image.fromarray(adversary_image) im.save("adversary_image_target.jpg") print("DeepFool target attack done")
def main(modulename,imagename): ''' Kera的应用模块Application提供了带有预训练权重的Keras模型,这些模型可以用来进行预测、特征提取和finetune 模型的预训练权重将下载到~/.keras/models/并在载入模型时自动载入 ''' # 设置为测试模式 keras.backend.set_learning_phase(0) model = ResNet50(weights=modulename) #model = InceptionV3(weights=modulename) logging.info(model.summary()) img = image.load_img(imagename, target_size=(224, 224)) raw_imagedata = image.img_to_array(img) raw_imagedata = np.expand_dims(raw_imagedata, axis=0) # 'RGB'->'BGR' imagedata = raw_imagedata[:, :, :, ::-1] #logging.info(raw_imagedata) #logging.info(imagedata) #logit fc1000 logits=model.get_layer('fc1000').output #keras中获取指定层的方法为: #base_model.get_layer('block4_pool').output) # advbox demo # 因为原始数据没有归一化 所以bounds=(0, 255) KerasMode内部在进行预测和计算梯度时会进行预处理 # imagenet数据集归一化时 标准差为1 mean为[104, 116, 123] # featurefqueezing_bit_depth featurefqueezing防御算法 提高生成攻击样本的质量 为特征数据的bit位 一般8就ok了 m = KerasModel( model, model.input, None, logits, None, bounds=(0, 255.0), channel_axis=3, preprocess=([104, 116, 123],1), featurefqueezing_bit_depth=8) attack = FGSM(m) #设置epsilons时不用考虑特征范围 算法实现时已经考虑了取值范围的问题 epsilons取值范围为(0,1) #epsilon支持动态调整 epsilon_steps为epsilon变化的个数 #epsilons为下限 epsilons_max为上限 #attack_config = {"epsilons": 0.3, "epsilons_max": 0.5, "epsilon_steps": 100} #静态epsilons attack_config = {"epsilons": 1, "epsilons_max": 10, "epsilon_steps": 1,"steps":100} #y设置为空 会自动计算 adversary = Adversary(imagedata.copy(),None) # FGSM non-targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): print( 'attack success, adversarial_label=%d' % (adversary.adversarial_label) ) #对抗样本保存在adversary.adversarial_example adversary_image=np.copy(adversary.adversarial_example) logging.info("adversary_image label={0} ".format(np.argmax(m.predict(adversary_image))) ) #logging.info(adversary_image) #强制类型转换 之前是float 现在要转换成uint8 adversary_image = np.array(adversary_image).astype("uint8").reshape([224,224,3]) #logging.info(adversary_image) adversary_image=adversary_image[:, :, ::-1] logging.info(adversary_image-raw_imagedata) img=array_to_img(adversary_image) img.save('adversary_image_nontarget.jpg') print("fgsm non-target attack done") attack = FGSMT(m) #静态epsilons attack_config = {"epsilons": 20, "epsilons_max": 20, "epsilon_steps": 1,"steps":100} adversary = Adversary(imagedata,None) tlabel = 489 adversary.set_target(is_targeted_attack=True, target_label=tlabel) # FGSM targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): print( 'attack success, adversarial_label=%d' % (adversary.adversarial_label) ) #对抗样本保存在adversary.adversarial_example adversary_image=np.copy(adversary.adversarial_example) #强制类型转换 之前是float 现在要转换成int8 adversary_image = np.array(adversary_image).astype("uint8").reshape([224,224,3]) adversary_image=adversary_image[:, :, ::-1] logging.info(adversary_image - raw_imagedata) img=array_to_img(adversary_image) img.save('adversary_image_target.jpg') print("fgsm target attack done")
def main(modulename, imagename): ''' Kera的应用模块Application提供了带有预训练权重的Keras模型,这些模型可以用来进行预测、特征提取和finetune 模型的预训练权重将下载到~/.keras/models/并在载入模型时自动载入 ''' # 设置为测试模式 keras.backend.set_learning_phase(0) model = ResNet50(weights=modulename) logging.info(model.summary()) img = image.load_img(imagename, target_size=(224, 224)) imagedata = image.img_to_array(img) #imagedata=imagedata[:, :, ::-1] imagedata = np.expand_dims(imagedata, axis=0) #logit fc1000 logits = model.get_layer('fc1000').output #keras中获取指定层的方法为: #base_model.get_layer('block4_pool').output) # advbox demo # 因为原始数据没有归一化 所以bounds=(0, 255) KerasMode内部在进行预测和计算梯度时会进行预处理 # imagenet数据集归一化时 标准差为1 mean为[104, 116, 123] m = KerasModel(model, model.input, None, logits, None, bounds=(0, 255), channel_axis=3, preprocess=([104, 116, 123], 1), featurefqueezing_bit_depth=8) attack = DeepFoolAttack(m) attack_config = {"iterations": 100, "overshoot": 10} #y设置为空 会自动计算 adversary = Adversary(imagedata[:, :, ::-1], None) # deepfool non-targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): print('attack success, adversarial_label=%d' % (adversary.adversarial_label)) #对抗样本保存在adversary.adversarial_example adversary_image = np.copy(adversary.adversarial_example) #强制类型转换 之前是float 现在要转换成iunt8 #::-1 reverses the color channels, because Keras ResNet50 expects BGR instead of RGB adversary_image = adversary_image[:, :, ::-1] adversary_image = np.array(adversary_image).astype("uint8").reshape( [224, 224, 3]) logging.info(adversary_image - imagedata) img = array_to_img(adversary_image) img.save('adversary_image_nontarget.jpg') print("deepfool non-target attack done") attack = DeepFoolAttack(m) attack_config = {"iterations": 100, "overshoot": 10} adversary = Adversary(imagedata[:, :, ::-1], None) tlabel = 489 adversary.set_target(is_targeted_attack=True, target_label=tlabel) # deepfool targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): print('attack success, adversarial_label=%d' % (adversary.adversarial_label)) #对抗样本保存在adversary.adversarial_example adversary_image = np.copy(adversary.adversarial_example) #强制类型转换 之前是float 现在要转换成int8 #::-1 reverses the color channels, because Keras ResNet50 expects BGR instead of RGB adversary_image = adversary_image[:, :, ::-1] adversary_image = np.array(adversary_image).astype("uint8").reshape( [224, 224, 3]) logging.info(adversary_image - imagedata) img = array_to_img(adversary_image) img.save('adversary_image_target.jpg') print("deepfool target attack done")
def main(image_path): # Define what device we are using logging.info("CUDA Available: {}".format(torch.cuda.is_available())) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") orig = cv2.imread(image_path)[..., ::-1] orig = cv2.resize(orig, (224, 224)) img = orig.copy().astype(np.float32) mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] img /= 255.0 img = old_div((img - mean), std) img = img.transpose(2, 0, 1) img = Variable( torch.from_numpy(img).to(device).float().unsqueeze(0)).cpu().numpy() # Initialize the network #Alexnet model = models.alexnet(pretrained=True).to(device).eval() #model = models.resnet18(pretrained=True).to(device).eval() #print(model) #设置为不保存梯度值 自然也无法修改 for param in model.parameters(): #print(param) #print(param.requires_grad) param.requires_grad = False #loss_func=nn.CrossEntropyLoss() # advbox demo m = PytorchModel(model, None, (-1, 1), channel_axis=1) attack = FGSMT(m) #attack = FGSM(m) # 静态epsilons attack_config = {"epsilons": 0.2, "epsilon_steps": 1, "steps": 100} inputs = img #labels=388 labels = None print(inputs.shape) adversary = Adversary(inputs, labels) #adversary = Adversary(inputs, 388) tlabel = 538 adversary.set_target(is_targeted_attack=True, target_label=tlabel) adversary = attack(adversary, **attack_config) if adversary.is_successful(): print('attack success, adversarial_label=%d' % (adversary.adversarial_label)) adv = adversary.adversarial_example[0] adv = adv.transpose(1, 2, 0) adv = (adv * std) + mean adv = adv * 255.0 adv = adv[..., ::-1] # RGB to BGR adv = np.clip(adv, 0, 255).astype(np.uint8) cv2.imwrite('img_adv.png', adv) else: print('attack failed') print("fgsm attack done")