예제 #1
0
def boundary_attack(model,
                    images,
                    labels,
                    targeted=False,
                    init=None,
                    max_iters=1000,
                    spherical_step=0.01,
                    source_step=0.01,
                    step_adaptation=1.5,
                    reset_step_every=50,
                    transformation=None,
                    dataset_name='imagenet',
                    blended_noise=False,
                    dct_mode='none',
                    dct_ratio=1.0,
                    repeat_images=1,
                    halve_every=250):

    if transformation is None:
        transformation = lambda x: x

    images = images.cuda()
    labels = labels.cuda()
    batch_size = images.size(0)
    base_preds, _ = utils.get_preds(model,
                                    transformation(images.cuda()),
                                    dataset_name,
                                    batch_size=batch_size,
                                    return_cpu=False)
    images = images.repeat(repeat_images, 1, 1, 1)
    labels = labels.repeat(repeat_images)
    if repeat_images > 1:
        multipliers = (torch.ones(repeat_images) * 2).pow(
            torch.arange(0, repeat_images).float())
        dct_ratio = torch.ones(batch_size) * dct_ratio
        dct_ratio = (dct_ratio.unsqueeze(0).repeat(repeat_images, 1) *
                     multipliers.unsqueeze(1).repeat(1, batch_size)).view(
                         -1, 1).squeeze()
    images_vec = images.view(batch_size, -1)
    spherical_step_stats = torch.zeros(batch_size, max_iters)
    source_step_stats = torch.zeros(batch_size, max_iters)
    mse_stats = torch.zeros(batch_size, max_iters)
    distance_stats = torch.zeros(batch_size, max_iters)

    # sample random noise as initialization
    init = torch.zeros(images.size()).cuda()
    preds = labels.clone()
    while preds.eq(labels).sum() > 0:
        print("trying again")
        idx = torch.arange(0, batch_size).long().cuda()[preds.eq(labels)]
        noise = torch.rand(images[idx].size())
        init[idx] = noise.cuda()
        preds, _ = utils.get_preds(model,
                                   transformation(init),
                                   dataset_name,
                                   batch_size=batch_size,
                                   return_cpu=False)

    if blended_noise:
        min_alpha = torch.zeros(batch_size).cuda()
        max_alpha = torch.ones(batch_size).cuda()
        # binary search up to precision 2^(-10)
        for _ in range(10):
            alpha = (min_alpha + max_alpha) / 2
            alpha_expanded = alpha.view(batch_size, 1, 1, 1).expand_as(init)
            interp = alpha_expanded * init + (1 - alpha_expanded) * images
            preds, _ = utils.get_preds(model,
                                       transformation(interp),
                                       dataset_name,
                                       batch_size=batch_size,
                                       return_cpu=False)
            if targeted:
                min_alpha[preds.ne(labels)] = alpha[preds.ne(labels)]
                max_alpha[preds.eq(labels)] = alpha[preds.eq(labels)]
            else:
                min_alpha[preds.eq(labels)] = alpha[preds.eq(labels)]
                max_alpha[preds.ne(labels)] = alpha[preds.ne(labels)]
        alpha = max_alpha.view(batch_size, 1, 1, 1).expand_as(init)
        perturbed = alpha * init + (1 - alpha) * images
    else:
        perturbed = init

    # recording success rate of previous moves for adjusting step size
    spherical_succ = torch.zeros(batch_size, reset_step_every).cuda()
    source_succ = torch.zeros(batch_size, reset_step_every).cuda()
    spherical_steps = (torch.ones(batch_size) * spherical_step).cuda()
    source_steps = (torch.ones(batch_size) * source_step).cuda()

    for i in range(max_iters):
        candidates, spherical_candidates = generate_candidate(
            images,
            perturbed,
            spherical_steps,
            source_steps,
            dct_mode=dct_mode,
            dct_ratio=dct_ratio)
        # additional query on spherical candidate for RGB-BA
        if dct_mode:
            spherical_preds = labels + 1
        else:
            spherical_preds, _ = utils.get_preds(
                model,
                transformation(spherical_candidates),
                dataset_name,
                batch_size=batch_size,
                return_cpu=False)
        source_preds, _ = utils.get_preds(model,
                                          transformation(candidates),
                                          dataset_name,
                                          batch_size=batch_size,
                                          return_cpu=False)
        spherical_succ[:, i % reset_step_every][spherical_preds.ne(labels)] = 1
        source_succ[:, i % reset_step_every][source_preds.ne(labels)] = 1
        # reject moves if they result in correctly classified images
        if source_preds.eq(labels).sum() > 0:
            idx = torch.arange(
                0, batch_size).long().cuda()[source_preds.eq(labels)]
            candidates[idx] = perturbed[idx]
        # reject moves if MSE is already low enough
        if i > 0:
            candidates[mse_prev.lt(1e-6)] = perturbed[mse_prev.lt(1e-6)]
        # record some stats
        perturbed_vec = perturbed.view(batch_size, -1)
        candidates_vec = candidates.view(batch_size, -1)
        mse_prev = (images_vec - perturbed_vec).pow(2).mean(1)
        mse = (images_vec - candidates_vec).pow(2).mean(1)
        reduction = 100 * (mse_prev.mean() - mse.mean()) / mse_prev.mean()
        norms = (images_vec - candidates_vec).norm(2, 1)
        print('Iteration %d:  MSE = %.6f (reduced by %.4f%%), L2 norm = %.4f' %
              (i + 1, mse.mean(), reduction, norms.mean()))

        if (i + 1) % reset_step_every == 0:
            # adjust step size
            spherical_steps, source_steps, p_spherical, p_source = adjust_step(
                spherical_succ,
                source_succ,
                spherical_steps,
                source_steps,
                step_adaptation,
                dct_mode=dct_mode)
            spherical_succ.fill_(0)
            source_succ.fill_(0)
            print('Spherical success rate = %.4f, new spherical step = %.4f' %
                  (p_spherical.mean(), spherical_steps.mean()))
            print('Source success rate = %.4f, new source step = %.4f' %
                  (p_source.mean(), source_steps.mean()))

        mse_stats[:, i] = mse
        distance_stats[:, i] = norms
        spherical_step_stats[:, i] = spherical_steps
        source_step_stats[:, i] = source_steps
        perturbed = candidates

        if halve_every > 0 and perturbed.size(0) > batch_size and (
                i + 1) % halve_every == 0:
            # apply Hyperband to cut unsuccessful branches
            num_repeats = int(batch_size / batch_size)
            perturbed_vec = perturbed.view(batch_size, -1)
            mse = (images_vec - perturbed_vec).pow(2).mean(1).view(
                num_repeats, batch_size)
            _, indices = mse.sort(0)
            indices = indices[:int(num_repeats / 2)].cpu()
            idx = torch.arange(0.0, float(batch_size)).unsqueeze(0).repeat(
                int(num_repeats / 2), 1).long()
            idx += indices * batch_size
            idx = idx.view(-1, 1).squeeze()
            batch_size = idx.size(0)
            images = images[idx.cuda()]
            labels = labels[idx.cuda()]
            images_vec = images_vec[idx.cuda()]
            perturbed = perturbed[idx.cuda()]
            spherical_step_stats = spherical_step_stats[idx]
            source_step_stats = source_step_stats[idx]
            mse_stats = mse_stats[idx]
            distance_stats = distance_stats[idx]
            dct_ratio = dct_ratio[idx]
            spherical_steps = spherical_steps[idx.cuda()]
            source_steps = source_steps[idx.cuda()]
            spherical_succ = spherical_succ[idx.cuda()]
            source_succ = source_succ[idx.cuda()]

    return perturbed.cpu(
    ), mse_stats, distance_stats, spherical_step_stats, source_step_stats
예제 #2
0
def inference_on_single_labelled_image_pca(
    query_img_file,
    labels_dir,
    img_dir,
    img_fts_dir,
    weights_file,
    top_k=1000,
    plot=True,
):
    """
    Function that returns the average precision for a given query image and also plots the top 20 results

    Args:
        query_img_file  : path of query image file
        labels_dir  : Directory for ground truth labels
        img_dir     : Directory holding the images
        img_fts_dir : Directory holding the pca reduced features generated through create_db.py script
        weights_file: path of trained weights file
        top_k       : top_k values used to calculate the average precison
        plot        : if True, top 20 results are plotted

    Returns:
        Average precision for the query image file
    """
    # Create cuda parameters
    use_cuda = torch.cuda.is_available()
    np.random.seed(2019)
    torch.manual_seed(2019)
    device = torch.device("cuda" if use_cuda else "cpu")
    print("Available device = ", device)

    # Create embedding network
    resnet_model = create_embedding_net()
    model = TripletNet(resnet_model)
    model.load_state_dict(torch.load(weights_file))
    model.to(device)
    model.eval()

    # Get query name
    query_img_name = query_img_file.split("/")[-1]
    query_img_path = os.path.join(img_dir, query_img_name)

    # Create Query extractor object
    QUERY_EXTRACTOR = QueryExtractor(labels_dir, img_dir, subset="inference")

    # Create query ground truth dictionary
    query_gt_dict = QUERY_EXTRACTOR.get_query_map()[query_img_name]

    # Creat image database
    QUERY_IMAGES_FTS = [
        os.path.join(img_fts_dir, file)
        for file in sorted(os.listdir(img_fts_dir))
    ]
    QUERY_IMAGES = [
        os.path.join(img_fts_dir, file) for file in sorted(os.listdir(img_dir))
    ]

    # Query fts
    query_fts = get_query_embedding(model, device,
                                    query_img_file).detach().cpu().numpy()
    query_fts = perform_pca_on_single_vector(query_fts)

    # Create similarity list
    similarity = []
    for file in tqdm(QUERY_IMAGES_FTS):
        file_fts = np.squeeze(np.load(file))
        cos_sim = np.dot(query_fts, file_fts) / (np.linalg.norm(query_fts) *
                                                 np.linalg.norm(file_fts))
        similarity.append(cos_sim)

    # Get best matches using similarity
    similarity = np.asarray(similarity)
    indexes = (-similarity).argsort()[:top_k]
    best_matches = [QUERY_IMAGES[index] for index in indexes]

    # Get preds
    if plot:
        preds = get_preds_and_visualize(best_matches, query_gt_dict, img_dir,
                                        20)
    else:
        preds = get_preds(best_matches, query_gt_dict)

    # Get average precision
    ap = ap_per_query(best_matches, query_gt_dict)

    return ap
예제 #3
0
batchfile = '%s/images_%s_%d.pth' % (args.sampled_image_dir, args.model,
                                     args.num_runs)
if os.path.isfile(batchfile):
    checkpoint = torch.load(batchfile)
    images = checkpoint['images']
    labels = checkpoint['labels']
else:
    images = torch.zeros(args.num_runs, 3, image_size, image_size)
    labels = torch.zeros(args.num_runs).long()
    preds = labels + 1
    while preds.ne(labels).sum() > 0:
        idx = torch.arange(0, images.size(0)).long()[preds.ne(labels)]
        for i in list(idx):
            images[i], labels[i] = testset[random.randint(0, len(testset) - 1)]
        preds[idx], _ = utils.get_preds(model,
                                        images[idx],
                                        'imagenet',
                                        batch_size=args.batch_size)
    torch.save({'images': images, 'labels': labels}, batchfile)

if args.order == 'rand':
    n_dims = 3 * args.freq_dims * args.freq_dims
else:
    n_dims = 3 * image_size * image_size
if args.num_iters > 0:
    max_iters = int(min(n_dims, args.num_iters))
else:
    max_iters = int(n_dims)
N = int(math.floor(float(args.num_runs) / float(args.batch_size)))
for i in range(N):
    upper = min((i + 1) * args.batch_size, args.num_runs)
    images_batch = images[(i * args.batch_size):upper]