Exemple #1
0
def weighted_subtree_triplet_ebp(topk=1, mask='nose'):
    """Weighted subtree excitation backprop, montage visualization of sorted layers"""

    print('[test_whitebox.weighted_subtree_triplet_ebp]: Detection and encoding for (mate, non-mate, probe) triplet')
    wbnet = WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth'))
    wb = Whitebox(wbnet, ebp_version=5)
    (x_mate, x_nonmate, img_probe, img_probe_display) = _encode_triplet_test_cases(wb, mask=mask)
    wb.net.set_triplet_classifier((1.0/2500.0)*x_mate, (1.0/2500.0)*x_nonmate)  # rescale for softmax

    print('[test_whitebox.weighted_subtree_triplet_ebp]: topk=%d weighted subtree for ground truth mask region "%s"' % (topk, mask))
    (img_subtree, P_img, P_subtree, k_subtree) = wb.weighted_subtree_ebp(img_probe, k_poschannel=0, k_negchannel=1, topk=topk, do_max_subtree=False, subtree_mode='all', do_mated_similarity_gating=True)
    print('[test_whitebox.weighted_subtree_triplet_ebp]: weighted subtree EBP, selected layers=%s, P=%s' % (str(k_subtree), str(P_subtree)))

    imlist = []
    P_subtree.append(1.0)  # this contains the scale factors 
    P_img.append(img_subtree)
    for (k, (p, img_saliency)) in enumerate(zip(P_subtree, P_img)):
        outfile = os.path.join(tempfile.gettempdir(), '%s.jpg' % uuid.uuid1().hex)
        #alpha_composite = _blend_saliency_map(img_probe_display, np.float32(img_saliency)/255.0, scale_factor=1.0/(p+1E-12)).save(outfile, 'JPEG', quality=95)  # EBP scale factor for display 
        alpha_composite = _blend_saliency_map(img_probe_display, np.float32(img_saliency)/255.0, scale_factor=1.0).save(outfile, 'JPEG', quality=95)    # uniform scale factor for display
        imlist.append(vipy.image.ImageDetection(filename=outfile, xmin=0, ymin=0, width=112, height=112).rgb())

    f_montage = './test_whitebox_weighted_subtree_ebp_topk_%d_mask_%s.jpg' % (topk, mask)
    im_montage = vipy.visualize.montage(imlist, imgheight=112, imgwidth=112, skip=False, border=1)
    vipy.util.imwrite(im_montage.numpy(), f_montage)
    print('[test_whitebox.weighted_subtree_triplet_ebp]: Saving montage (rowwise subtree, sorted by increasing gradient weight) to "%s"' % f_montage)
    print('[test_whitebox.weighted_subtree_triplet_ebp]: Final image in montage (bottom right) is weighted subtree saliency map')
Exemple #2
0
def contrastive_ebp():
    """Contrastive excitation backprop"""
    wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth')))
    x_probe = wb.net.preprocess(PIL.Image.open('../data/demo_face.jpg'))
    img_saliency = wb.contrastive_ebp(x_probe, k_poschannel=0, k_negchannel=100)
    outfile = './test_whitebox_contrastive_ebp.jpg';
    _blend_saliency_map(PIL.Image.open('../data/demo_face.jpg').resize( img_saliency.shape ), img_saliency).save(outfile)
    print('[test_whitebox.contrastive_ebp]: saving saliency map blended overlay to "%s"' % outfile)
Exemple #3
0
def truncated_contrastive_triplet_ebp():
    """Truncated contrastive triplet excitation backprop"""
    print('[test_whitebox.truncated_contrastive_triplet_ebp]: Detection and encoding for (mate, non-mate, probe) triplet')
    wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth')))
    (x_mate, x_nonmate, img_probe, img_probe_display) = _encode_triplet_test_cases(wb, 'nose')
    wb.net.set_triplet_classifier((1.0/2500.0)*x_mate, (1.0/2500.0)*x_nonmate)  # rescale encodings to avoid softmax overflow
    img_saliency = wb.truncated_contrastive_ebp(img_probe, k_poschannel=0, k_negchannel=1, percentile=20)
    outfile = './test_whitebox_truncated_contrastive_triplet_ebp.jpg';
    _blend_saliency_map(img_probe_display, img_saliency).save(outfile)
    print('[test_whitebox.truncated_contrastive_triplet_ebp]: saving saliency map blended overlay to "%s"' % outfile)
Exemple #4
0
def figure1():
    """16x16 frontal mates, frontal non-mates, any probe, resnet-101 whitebox"""

    wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth')))
    if not os.path.exists('_vggface2_topk_frontal_nonmates.pkl'):
        _vggface2_topk_frontal_nonmates(wb, topk=32)  # recompute once
    n_subjects = 16
    (matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_top1_probe_mixedpose(n_subjects)    

    # Detection and color correction
    matelist = [f_detection(im).rgb() for im in matelist]
    nonmatelist = [f_detection(im).rgb() for im in nonmatelist]
    probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist]
    probelist_clean = copy.deepcopy(probelist)

    # Figure 1a
    probelist = copy.deepcopy(probelist_clean)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1a_%d.jpg' % n_subjects, f_saliency=None)
    print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
    probelist_1a = copy.deepcopy(probelist)
   
    # Figure 1b
    probelist = copy.deepcopy(probelist_clean)
    f_saliency = lambda im: f_saliency_whitebox_ebp(wb, im)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1b_%d.jpg' % n_subjects, f_saliency=f_saliency)
    print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
    probelist_1b = copy.deepcopy(probelist)

    # Figure 1c
    probelist = copy.deepcopy(probelist_clean)
    f_saliency = lambda im: f_saliency_whitebox_cebp(wb, im)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1c_%d.jpg' % n_subjects, f_saliency=f_saliency)
    print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
    probelist_1c = copy.deepcopy(probelist)

    # Figure 1d
    probelist = copy.deepcopy(probelist_clean)
    f_saliency = lambda im: f_saliency_whitebox_tcebp(wb, im)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1d_%d.jpg' % n_subjects, f_saliency=f_saliency)
    print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
    probelist_1d = copy.deepcopy(probelist)

    # Figure 1e
    probelist = copy.deepcopy(probelist_clean)
    f_saliency = lambda im: f_saliency_whitebox_weighted_subtree(wb, im)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1e_%d.jpg' % n_subjects, f_saliency=f_saliency)
    print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
    probelist_1e = copy.deepcopy(probelist)

    # Figure 1f
    probelist = copy.deepcopy(probelist_clean)
    matelist = [matelist[0]]*n_subjects
    probelist = [probelist_1a[0]] + [probelist_1b[0]] + [probelist_1c[0]] + [probelist_1d[0]] + [probelist_1e[0]]
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1f_%d.jpg' % n_subjects, f_saliency=None)
    print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
Exemple #5
0
def triplet_ebp():
    """Triplet excitation backprop"""
    print('[test_whitebox.triplet_ebp]: Detection and encoding for (mate, probe)')
    wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth')))
    (x_mate, x_nonmate, img_probe, img_probe_display) = _encode_triplet_test_cases(wb, 'nose')
    wb.net.set_triplet_classifier((1.0/2500.0)*x_mate, (1.0/2500.0)*x_nonmate)  # rescale encodings to avoid softmax overflow
    P = torch.zeros( (1, wb.net.num_classes()) );  P[0][0] = 1.0;  # one-hot prior probability for mate
    img_saliency = wb.ebp(img_probe, P)
    outfile = './test_whitebox_triplet_ebp.jpg';
    _blend_saliency_map(img_probe_display, img_saliency).save(outfile)
    print('[test_whitebox.triplet_ebp]: saving saliency map blended overlay to "%s"' % outfile)
Exemple #6
0
def figure2():
    """One mate, top-k nonmates, row-wise by approach"""
    n_subjects = 10
    wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth')))
    if not os.path.exists('_vggface2_topk_nonmates.pkl'):
        _vggface2_topk_nonmates(wb, topk=32)  # recompute once
    (matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_topk_probe_frontalpose()

    # Detection and color correction
    matelist = [f_detection(im).rgb() for im in matelist]
    nonmatelist = [f_detection(im).rgb() for im in nonmatelist]
    probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist]
    probelist_clean = copy.deepcopy(probelist)

    # Figure 2a
    probelist = copy.deepcopy(probelist_clean)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2a_%d.jpg' % n_subjects, f_saliency=None)
    probelist_2a = copy.deepcopy(probelist)
    print('[eccv20.figure2a]: Saving montage to "%s"' % f_montage)

    # Figure 2b
    probelist = copy.deepcopy(probelist_clean)
    f_saliency = lambda im: f_saliency_whitebox_ebp(wb, im)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2b_%d.jpg' % n_subjects, f_saliency=f_saliency)
    probelist_2b = copy.deepcopy(probelist)

    # Figure 2c
    probelist = copy.deepcopy(probelist_clean)
    f_saliency = lambda im: f_saliency_whitebox_cebp(wb, im)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2c_%d.jpg' % n_subjects, f_saliency=f_saliency)
    probelist_2c = copy.deepcopy(probelist)

    # Figure 2d
    probelist = copy.deepcopy(probelist_clean)
    f_saliency = lambda im: f_saliency_whitebox_tcebp(wb, im)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2d_%d.jpg' % n_subjects, f_saliency=f_saliency)
    probelist_2d = copy.deepcopy(probelist)

    # Figure 2e
    probelist = copy.deepcopy(probelist_clean)
    f_saliency = lambda im: f_saliency_whitebox_weighted_subtree(wb, im)
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2e_%d.jpg' % n_subjects, f_saliency=f_saliency)
    probelist_2e = copy.deepcopy(probelist)

    # Figure 2f
    probelist = copy.deepcopy(probelist_clean)
    matelist = [matelist[0]]*n_subjects
    probelist = [probelist_2a[0]] + [probelist_2b[0]] + [probelist_2c[0]] + [probelist_2d[0]] + [probelist_2e[0]]
    f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2f_%d.jpg' % n_subjects, f_saliency=None)
    print('[eccv20.figure2]: Saving montage to "%s"' % f_montage)
def layerwise_ebp():
    """EBP alpha transparency montage starting from each interior layer and selected node specified by argmax excitation at this layer"""

    wb = Whitebox(WhiteboxSTResnet(
        stresnet101('../models/resnet101v4_28NOV17_train.pth')),
                  ebp_version=5,
                  ebp_subtree_mode='all')
    im_probe = PIL.Image.open('../data/demo_face.jpg')
    img_display = np.array(im_probe.resize((112, 112)))
    x_probe = wb.net.preprocess(im_probe)

    imlist = []
    layers = wb._layers()
    for k in range(0, len(layers)):
        print('[test_layerwise_ebp][%d/%d]: layerwise EBP "%s"' %
              (k, len(layers), layers[k]))
        img_saliency = wb.layerwise_ebp(x_probe,
                                        k_poschannel=0,
                                        k_layer=k,
                                        mode='argmax',
                                        mwp=False)
        outfile = os.path.join(tempfile.gettempdir(), '%s.jpg' %
                               uuid.uuid1().hex)  # tempfile for montage
        img = PIL.Image.fromarray(
            np.concatenate(
                (img_display,
                 np.expand_dims(np.minimum(255, 10 + img_saliency), 2)),
                axis=2)).convert('RGBA')
        bg = PIL.Image.new('RGBA', img.size, (255, 255, 255))
        alpha_composite = PIL.Image.alpha_composite(
            bg, img).convert('RGB').save(outfile, 'JPEG', quality=95)
        imlist.append(
            vipy.image.ImageDetection(filename=outfile,
                                      xmin=0,
                                      ymin=0,
                                      width=112,
                                      height=112).rgb())

    f_montage = './test_whitebox_layerwise_ebp.jpg'
    img_montage = vipy.visualize.montage(imlist,
                                         112,
                                         112,
                                         grayscale=False,
                                         skip=False,
                                         border=1)
    vipy.util.imwrite(img_montage, f_montage)
    print(
        '[test_whitebox.layerwise_ebp]: Saving montage (rowwise by layers, approaching the image layer in bottom right) to "%s"'
        % f_montage)
Exemple #8
0
def ebp():
    """Excitation backprop in pytorch"""

    # Create whitebox object with STR-Janus resnet-101 convolutional network
    wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth')))
    x_probe = wb.net.preprocess(PIL.Image.open('../data/demo_face.jpg'))

    # Generate Excitation backprop (EBP) saliency map at first convolutional layer
    P = torch.zeros( (1, wb.net.num_classes()) );  P[0][0] = 1.0;  # one-hot prior probability
    img_saliency = wb.ebp(x_probe, P)

    # Overlay saliency map with display image
    img_display = PIL.Image.open('../data/demo_face.jpg').resize( (112,112) )
    outfile = 'test_whitebox_ebp.jpg';
    _blend_saliency_map(img_display, img_saliency).save(outfile)
    print('[test_whitebox.ebp]: saving saliency map blended overlay to "./%s"' % outfile)