예제 #1
0
    parser.add_argument('--start_index', '-st', default=0, type=int)
    parser.add_argument('--batch_size', default=1000, type=int)
    parser.add_argument('--out_dir', default='../results/tar_adv_img/')
    parser.add_argument('--defense', default=0, type=int)
    args = parser.parse_known_args()[0]
    imgPath = '../results/tar_adv_img/'  # 加载原图
    pretrained_path = '../results/adv_border_chk/'
    out_dir = args.out_dir  #保存目标攻击defense or 未经防御的对抗样本
    os.makedirs(out_dir, exist_ok=True)

    # Use GPUs
    device = torch.device("cpu")
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    data_loader = ImageNet.get_data_loaders(data_path=imgPath,
                                            batch_size=1,
                                            num_workers=0,
                                            normalize=False)

    classifier = ImageNet.get_classifier().to(device)
    classifier.eval()
    chk_name = os.listdir(pretrained_path)
    chk_name.sort()  # necessary in linux

    img_names = os.listdir('../results/adv_border_png/')
    img_names.sort()

    cnt = 0
    cnt_effect = 0
    cnt_total = 0
    cnt_temp = 0
    for i, (input, target) in enumerate(data_loader):
예제 #2
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Draw examples of attacked ImageNet examples')
    parser.add_argument('--framing',
                        required=True,
                        help='Path to pretrained framing')
    parser.add_argument('--output',
                        '-o',
                        default='examples.png',
                        help='Output file')
    args = parser.parse_args()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    _, data_loader = ImageNet.get_data_loaders(batch_size=BATCH_SIZE,
                                               num_workers=0,
                                               normalize=False,
                                               shuffle_val=True)

    framing = load_pretrained_imagenet_framing(args.framing).to(device)
    classifier = resnet50(pretrained=True).to(device)
    classifier.eval()

    input, target = next(iter(data_loader))
    input = input.to(device)

    with torch.no_grad():
        input_att, _ = framing(input, normalize=False)

    normalized_input = input.clone()
    normalized_input_att = input_att.clone()
    for id in range(BATCH_SIZE):