def multiple_saliency_maps(model, tensors, start, num, k=0, guide = True): backprop = Backprop(model) for i in range(start,start+num): cur_image = tensors[i,:,:,:].unsqueeze(0).requires_grad_(requires_grad=True) backprop.visualize(cur_image, k, guided=guide) plt.show() print("Range: ", start, " - ", start+num) start += num return start
def test_visualize_calls_calculate_gradients_twice(mocker, model): backprop = Backprop(model) mocker.spy(backprop, 'calculate_gradients') top_class = 5 target_class = 5 input_ = torch.zeros([1, 3, 224, 224]) make_expected_gradient_target(top_class) make_mock_output(mocker, model, target_class) backprop.visualize(input_, target_class, use_gpu=True) assert backprop.calculate_gradients.call_count == 2
def di_saliency(): hockey_path_violence = '/media/david/datos/Violence DATA/HockeyFights/frames/violence' hockey_path_noviolence = '/media/david/datos/Violence DATA/HockeyFights/frames/nonviolence' datasetAll, labelsAll, numFramesAll = createDataset(hockey_path_violence, hockey_path_noviolence) #ordered print(len(datasetAll), len(labelsAll), len(numFramesAll)) input_size = 224 data_transforms = createTransforms(input_size) dataset_source = "frames" debugg_mode = False avgmaxDuration = 1.66 interval_duration = 0.3 numDiPerVideos = 1 batch_size = 1 num_workers = 1 # model = torch.load('models/alexnet-frames-Finetuned:True-1di-tempMaxPool-OnPlateau.tar') path = os.path.join('models','alexnet-frames-Finetuned:True-1di-tempMaxPool-OnPlateau.tar') # model, input_size = initialize_model( model_name='alexnet', num_classes=2, feature_extract=False, numDiPerVideos=1, joinType='OnPlateau', use_pretrained=True) # model = load_checkpoint(model,'models/alexnet-frames-Finetuned:True-1di-tempMaxPool-OnPlateau.tar') # model.load_state_dict(torch.load(path)) model = torch.load(path) model.cuda() backprop = Backprop(model) image_datasets = { "train": ViolenceDatasetVideos( dataset=datasetAll, labels=labelsAll, spatial_transform=data_transforms["train"], source=dataset_source, interval_duration=interval_duration,difference=3, maxDuration=avgmaxDuration, nDynamicImages=numDiPerVideos, debugg_mode=debugg_mode, ), "test": ViolenceDatasetVideos( dataset=datasetAll, labels=labelsAll, spatial_transform=data_transforms["test"], source=dataset_source, interval_duration=interval_duration, difference=3, maxDuration=avgmaxDuration, nDynamicImages=numDiPerVideos, debugg_mode=debugg_mode, ) } dataloaders_dict = { "train": torch.utils.data.DataLoader( image_datasets["train"], batch_size=batch_size, shuffle=False, num_workers=num_workers, ), "test": torch.utils.data.DataLoader( image_datasets["test"], batch_size=batch_size, shuffle=False, num_workers=num_workers, ), } count = 0 max_plots = 5 for inputs, labels in dataloaders_dict["test"]: count += 1 if count > max_plots: break print('*' * 12) print('inputs size: ',inputs.size()) # inputs = inputs.permute(1, 0, 2, 3, 4) inputs = inputs.cuda() # labels = labels.to(self.device) backprop.visualize(inputs, target_class=None, guided=False, use_gpu=True, di=True)
def test_visualize_passes_gpu_flag(mocker, model): backprop = Backprop(model) mocker.spy(backprop, 'calculate_gradients') top_class = 5 target_class = 5 input_ = torch.zeros([1, 3, 224, 224]) make_expected_gradient_target(top_class) make_mock_output(mocker, model, target_class) backprop.visualize(input_, target_class, use_gpu=True) _, _, kwargs = backprop.calculate_gradients.mock_calls[0] assert kwargs['use_gpu']
def visualise_cnn(model_name, image, class_label, model_path=None, title=None): if model_path is None: model = get_model(model_name) else: model = get_model(model_name) model = load_model(model, model_path) model.eval() backprop = Backprop(model) # Transform the input image to a tensor img = apply_transforms(image) # Set a target class from ImageNet task: 24 in case of great gray owl imagenet = ImageNetIndex() target_class = imagenet[class_label] # Ready to roll! backprop.visualize(img, target_class, guided=True, title=title)
def saliency(): """### 1. Load an image""" buho = 'images/great_grey_owl.jpg' di1 = 'images/1.png' image = load_image(buho) # image = load_image(buho) # plt.imshow(image) # plt.title('Original image'+str(type(image))) # plt.axis('off'); # plt.show() """### 2. Load a pre-trained Model""" model = models.alexnet(pretrained=True) # model = torch.load('/content/alexnet-frames-Finetuned:False-1di-tempMaxPool-OnPlateau.tar') """### 3. Create an instance of Backprop with the model""" backprop = Backprop(model) """### 4. Visualize saliency maps""" # Transform the input image to a tensor owl = apply_transforms(image) # print(owl.size()) #torch.Size([/1, 3, 224, 224]) # input_size = 224 # data_transforms = createTransforms(input_size) # owl = data_transforms['test'](image) # owl = owl.unsqueeze(dim=0) # owl = owl.unsqueeze(dim=0) # owl = owl.permute(1, 0, 2, 3, 4) # print(owl.size()) # Set a target class from ImageNet task: 24 in case of great gray owl target_class = 24 # Ready to roll! backprop.visualize(owl, target_class=target_class, guided=True, use_gpu=True)
NIH_CXR_BASE = CXR_BASE.joinpath("nih/v1").resolve() test_df = pd.read_csv("~/cxr-jingyi/Age/NIH_test_2500.csv") path1 = test_df.iloc[0]['path'] path1 = NIH_CXR_BASE.joinpath(path1).resolve() import matplotlib.pyplot as plt from flashtorch.utils import apply_transforms, load_image from flashtorch.saliency import Backprop image = load_image(str(path1)) plt.imshow(image) model = MobileNet(16) checkpoint = torch.load( '/home/jingyi/cxr-jingyi/Age/result/supervised/model_best.pth.tar') model.load_state_dict(checkpoint['state_dict']) backprop = Backprop(model) # Transform the input image to a tensor owl = apply_transforms(image) # Set a target class from ImageNet task: 24 in case of great gray owl target_class = 16 # Ready to roll! backprop.visualize(owl, target_class, guided=True)
def visualize_helper(model_module, tensor=img, k=854): model = model_module(pretrained=True).float() backprop = Backprop(model) backprop.visualize(tensor, k, guided=True)
import matplotlib.pyplot as plt import torchvision.models as models from flashtorch.utils import load_image from flashtorch.saliency import Backprop from flashtorch.utils import apply_transforms image = load_image("G:\EEGNet/test/test.jpg") plt.imshow(image) net = models.vgg16(pretrained=1) backprop = Backprop(net) input_ = apply_transforms(image) target_class = 24 backprop.visualize(input_, target_class, guided=True)
import os # NOQA: E402 os.environ["CUDA_VISIBLE_DEVICES"] = "1" # NOQA: E402 import matplotlib.pyplot as plt import torchvision.models as models from flashtorch.utils import apply_transforms, load_image from flashtorch.saliency import Backprop ### 1. Load a pre-trained Model # model = models.alexnet(pretrained=True) model = models.vgg16(pretrained=True) ### 2. Create an instance of Backprop with the model backprop = Backprop(model) path = './figure' path_names = os.listdir(path) for i in range(len(path_names)): peacock = apply_transforms(load_image(os.path.join(path, path_names[i]))) backprop.visualize(peacock, None, guided=True, use_gpu=True) plt.show()
def visualize_saliency(model, tensor, k=1, guide=True): backprop = Backprop(model) backprop.visualize(tensor, k, guided=guide) plt.show()