def test_register_backward_hook_to_first_conv_layer(mocker, conv_layer, model): mocker.spy(conv_layer, 'register_backward_hook') g_ascent = GradientAscent(model) g_ascent.optimize(conv_layer, 0, num_iter=2) conv_layer.register_backward_hook.assert_called_once()
def test_optimize_with_custom_input(mocker, conv_layer, model): mocker.spy(model, 'forward') g_ascent = GradientAscent(model) custom_input = np.uint8(np.random.uniform(150, 180, (64, 64, 3))) custom_input = apply_transforms(custom_input, size=64) g_ascent.optimize(conv_layer, 0, input_=custom_input, num_iter=1) model.forward.assert_called_with(custom_input)
def test_remove_any_hooks_before_registering(mocker, conv_layer, model): mocker.spy(conv_layer, 'register_forward_hook') mocker.spy(conv_layer, 'register_backward_hook') another_conv_layer = model[10] mocker.spy(another_conv_layer, 'register_forward_hook') mocker.spy(another_conv_layer, 'register_backward_hook') g_ascent = GradientAscent(model) # Optimize for the first conv layer g_ascent.optimize(conv_layer, 0, num_iter=2) # Optimize for another g_ascent.optimize(another_conv_layer, 1, num_iter=2) # Backward hook is registered twice, as we always retrieve # gradients from it, but forward hook is registered only once conv_layer.register_forward_hook.assert_called_once() assert conv_layer.register_backward_hook.call_count == 2 # Instead forward hook is registered on the target layer another_conv_layer.register_forward_hook.assert_called_once()
conv1_2 = model.features[2] conv1_2_filters = [17, 33, 34, 57] conv2_1 = model.features[5] conv2_1_filters = [27, 40, 68, 73] conv3_1 = model.features[10] conv3_1_filters = [31, 61, 147, 182] conv4_1 = model.features[17] conv4_1_filters = [238, 251, 338, 495] conv5_1 = model.features[24] conv5_1_filters = [45, 271, 363, 409] g_ascent = GradientAscent(model.features,use_gpu=True) g_ascent.visualize(conv1_2, conv1_2_filters, title='conv1_2') plt.show() g_ascent.visualize(conv2_1, conv2_1_filters, title='conv2_1') plt.show() g_ascent.visualize(conv3_1, conv3_1_filters, title='conv3_1') plt.show() g_ascent.visualize(conv4_1, conv4_1_filters, title='conv4_1') plt.show() g_ascent.visualize(conv5_1, conv5_1_filters, title='conv5_1') plt.show() g_ascent.visualize(conv5_1, title='Randomly selected filters from conv5_1') plt.show()
def g_ascent(model): g_ascent = GradientAscent(model) g_ascent.img_size = 64 # to reduce test time return g_ascent
def app(model = torchvision.models.resnet18().eval(), in_dist_name="in", ood_data_names=["out","out2"], image=True): # Render the readme as markdown using st.markdown. st.markdown(get_file_content_as_string("Incepto/dashboard/intro.md")) layers = get_layers(model) # Once we have the dependencies, add a selector for the app mode on the sidebar. if st.sidebar.button("Go to Guide"): caching.clear_cache() st.markdown(get_file_content_as_string("Incepto/dashboard/details.md")) st.sidebar.title("Data Settings") # select which set of SNPs to explore dataset = st.sidebar.selectbox( "Set Dataset:", (in_dist_name,*ood_data_names), ) if image: visualization = st.sidebar.selectbox( "Set Visualization Type:", ("-", "Color Distribution for Entire Dataset", "Pixel Distribution for Entire Dataset", "Deconvolution", "Excitation Backpropgation","Gradient","Grad-CAM","Guided Backpropagation","Linear Approximation", "Extremal Perturbation", "RISE"), ) else: visualization = st.sidebar.selectbox( "Set Visualization Type:", ("-", "Average Signal for Entire Dataset", "Deconvolution", "Excitation Backpropgation","Gradient","Grad-CAM","Guided Backpropagation","Linear Approximation", "Extremal Perturbation", "RISE"), ) if image: if visualization == "Deconvolution": caching.clear_cache() saliency_layer=st.selectbox("Select Layer:",tuple(layers)) # st.number_input(label="Enter a channel number:", step=1, min_value=0, value=0) _, x, category_id, _ = get_example_data() saliency = deconvnet(model, x.cpu(), category_id, saliency_layer=saliency_layer) fig = plt.figure(figsize=(40,40)) ax = fig.add_subplot(131) ax.imshow(np.asarray(saliency.squeeze())) ax = fig.add_subplot(132) ax.imshow(np.asarray(x.cpu().squeeze().permute(1,2,0).detach().numpy() )) st.pyplot(fig) elif visualization == "Grad-CAM": with st.spinner("Generating Plot"): caching.clear_cache() saliency_layer=st.selectbox("Select Layer:",tuple(layers)) # st.number_input(label="Enter a channel number:", step=1, min_value=0, value=0) _, x, category_id, _ = get_example_data() saliency = linear_approx(model, x.cpu(), category_id, saliency_layer=saliency_layer) fig = plt.figure(figsize=(40,40)) ax = fig.add_subplot(131) ax.imshow(np.asarray(saliency.squeeze().detach().numpy() )) ax = fig.add_subplot(132) ax.imshow(np.asarray(x.cpu().squeeze().permute(1,2,0).detach().numpy() )) st.pyplot(fig) elif visualization == "Guided Backpropagation": with st.spinner("Generating Plot"): caching.clear_cache() saliency_layer=st.selectbox("Select Layer:",tuple(layers)) # st.number_input(label="Enter a channel number:", step=1, min_value=0, value=0) _, x, category_id, _ = get_example_data() saliency = guided_backprop(model, x.cpu(), category_id, saliency_layer=saliency_layer) fig = plt.figure(figsize=(40,40)) ax = fig.add_subplot(131) ax.imshow(np.asarray(saliency.squeeze().detach().numpy() )) ax = fig.add_subplot(132) ax.imshow(np.asarray(x.cpu().squeeze().permute(1,2,0).detach().numpy() )) st.pyplot(fig) elif visualization == "Gradient": with st.spinner("Generating Plot"): caching.clear_cache() saliency_layer=st.selectbox("Select Layer:",tuple(layers)) # st.number_input(label="Enter a channel number:", step=1, min_value=0, value=0) _, x, category_id, _ = get_example_data() saliency = gradient(model, x.cpu(), category_id, saliency_layer=saliency_layer) fig = plt.figure(figsize=(40,40)) ax = fig.add_subplot(131) ax.imshow(np.asarray(saliency.squeeze().detach().numpy() )) ax = fig.add_subplot(132) ax.imshow(np.asarray(x.cpu().squeeze().permute(1,2,0).detach().numpy() )) st.pyplot(fig) elif visualization == "Linear Approximation": with st.spinner("Generating Plot"): caching.clear_cache() saliency_layer=st.selectbox("Select Layer:",tuple(layers)) # st.number_input(label="Enter a channel number:", step=1, min_value=0, value=0) _, x, category_id, _ = get_example_data() saliency = gradient(model, x.cpu(), category_id, saliency_layer=saliency_layer) fig = plt.figure(figsize=(40,40)) ax = fig.add_subplot(131) ax.imshow(np.asarray(saliency.squeeze().detach().numpy() )) ax = fig.add_subplot(132) ax.imshow(np.asarray(x.cpu().squeeze().permute(1,2,0).detach().numpy() )) st.pyplot(fig) elif visualization == "Extremal Perturbation": with st.spinner("Generating Plot"): caching.clear_cache() # saliency_layer=st.selectbox("Select Layer:",tuple(layers)) # st.number_input(label="Enter a channel number:", step=1, min_value=0, value=0) _, x, category_id, _ = get_example_data() masks_1, _ = extremal_perturbation( model, x.cpu(), category_id, reward_func=contrastive_reward, debug=False, areas=[0.12],) fig = plt.figure(figsize=(40,40)) ax = fig.add_subplot(131) ax.imshow(np.asarray(masks_1.squeeze().detach().numpy() )) ax = fig.add_subplot(132) ax.imshow(np.asarray(x.cpu().squeeze().permute(1,2,0).detach().numpy() )) st.pyplot(fig) elif visualization == "RISE": with st.spinner("Generating Plot"): caching.clear_cache() # saliency_layer=st.selectbox("Select Layer:",tuple(layers)) # st.number_input(label="Enter a channel number:", step=1, min_value=0, value=0) _, x, category_id, _ = get_example_data() saliency = rise(model, x.cpu()) saliency = saliency[:, category_id].unsqueeze(0) fig = plt.figure(figsize=(40,40)) ax = fig.add_subplot(131) ax.imshow(np.asarray(saliency.squeeze().detach().numpy() )) ax = fig.add_subplot(132) ax.imshow(np.asarray(x.cpu().squeeze().permute(1,2,0).detach().numpy() )) st.pyplot(fig) elif visualization == "Color Distribution for Entire Dataset": with st.spinner("Generating Plot"): caching.clear_cache() # saliency_layer=st.selectbox("Select Layer:",tuple(layers)) # st.number_input(label="Enter a channel number:", step=1, min_value=0, value=0) _, x, category_id, _ = get_example_data() x = sum(x)/len(x) image = x.cpu().detach().numpy() fig = plt.figure() mpl.rcParams.update({'font.size': 15}) _ = plt.hist(image[:, :, 0].ravel(), bins = 256, color = 'red', alpha = 0.5) _ = plt.hist(image[:, :, 1].ravel(), bins = 256, color = 'Green', alpha = 0.5) _ = plt.hist(image[:, :, 2].ravel(), bins = 256, color = 'Blue', alpha = 0.5) _ = plt.xlabel('Intensity Value') _ = plt.ylabel('Count') _ = plt.legend(['Red_Channel', 'Green_Channel', 'Blue_Channel']) st.pyplot(fig) elif visualization == "Pixel Distribution for Entire Dataset": with st.spinner("Generating Plot"): caching.clear_cache() _, x, category_id, _ = get_example_data() x = sum(x)/len(x) image = x.cpu().detach().numpy() fig = plt.figure(figsize=(40,40)) plt.ylabel("Count") plt.xlabel("Intensity Value") mpl.rcParams.update({'font.size': 55}) ax = plt.hist(x.cpu().detach().numpy().ravel(), bins = 256) vlo = cv2.Laplacian(x.cpu().detach().numpy().ravel(), cv2.CV_32F).var() plt.text(1, 1, ('Variance of Laplacian: '+str(vlo)), style='italic', bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 10}) st.pyplot(fig) mpl.rcParams.update({'font.size': 15}) if st.sidebar.button("Visualize Model"): saliency_layer=st.selectbox("Select Layer:",tuple(layers)) filter = st.number_input(label="Enter a filter number:", step=1, min_value=1, value=1) g_ascent = GradientAscent(model) g_ascent.use_gpu = False layer = model.conv1 exec("layer = model.conv1") print(layer) img = g_ascent.visualize(layer, filter, title=saliency_layer,return_output=True)[0][0][0] fig = plt.figure(figsize=(40,40)) ax = fig.add_subplot(131) ax.imshow(np.asarray(img.cpu().detach().numpy() )) st.pyplot(fig)