def process_a_batch(image_paths, target_layer, arch, topk, output_dir, cuda): device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision model = models.__dict__[arch](pretrained=True) model.to(device) model.eval() # Images images = load_images_only(image_paths) images = torch.stack(images).to(device) bp = BackPropagation(model=model) probs, ids = bp.forward(images) # sorted # ========================================================================= print("Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) for i in range(topk): # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=target_layer) regions = regions.cpu().numpy() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(image_paths[j], classes[ids[j, i]], probs[j, i])) # Grad-CAM save_cam(filename=os.path.join( output_dir, "{}-{}-{}-{:.3}.png".format(os.path.basename(image_paths[j]), classes[ids[j, i]], ids[j, i], probs[j, i]), ), gcam=regions[j, 0]) bp.remove_hook() gcam.remove_hook() del bp del images del gcam del model del regions del probs del ids del _ torch.cuda.empty_cache()
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda): """ Visualize model responses given multiple images """ device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision model = models.__dict__[arch](pretrained=True) model.to(device) model.eval() # Images images, raw_images = load_images(image_paths) images = torch.stack(images).to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with images 3. Run backward() with a list of specific classes 4. Run generate() to export results """ # ========================================================================= print("Vanilla Backpropagation:") bp = BackPropagation(model=model) probs, ids = bp.forward(images) # sorted for i in range(topk): bp.backward(ids=ids[:, [i]]) gradients = bp.generate() # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=osp.join( output_dir, "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution:") deconv = Deconvnet(model=model) _ = deconv.forward(images) for i in range(topk): deconv.backward(ids=ids[:, [i]]) gradients = deconv.generate() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=osp.join( output_dir, "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(images) for i in range(topk): # Guided Backpropagation gbp.backward(ids=ids[:, [i]]) gradients = gbp.generate() # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=target_layer) for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) # Guided Backpropagation save_gradient( filename=osp.join( output_dir, "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Grad-CAM save_gradcam( filename=osp.join( output_dir, "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer, classes[ids[j, i]]), ), gcam=regions[j, 0], raw_image=raw_images[j], ) # Guided Grad-CAM save_gradient( filename=osp.join( output_dir, "{}-{}-guided_gradcam-{}-{}.png".format( j, arch, target_layer, classes[ids[j, i]]), ), gradient=torch.mul(regions, gradients)[j], )
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda, checkpoint, distribute): """ Visualize model responses given multiple images """ device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision model = models.__dict__[arch]() model = model.cuda() # print(model) checkpoint = checkpoint + arch + '/model_best.pth.tar' # print(checkpoint) check_point = torch.load(checkpoint, map_location=lambda storage, loc: storage.cuda(0)) distributed_model = (distribute > 0.5) only_CAM = True if distributed_model == True: # create new OrderedDict that does not contain `module.` from collections import OrderedDict new_check_point = OrderedDict() for k, v in check_point['state_dict'].items(): # name = k[7:] # remove `module.` # name = k[9:] # remove `module.1.` if k.startswith('module.1.'): name = k[9:] else: name = k[7:] new_check_point[name] = v # load params model.load_state_dict(new_check_point) else: model.load_state_dict(check_point['state_dict']) model.to(device) model.eval() # Images images = [] raw_images = [] print("Images:") for i, image_path in enumerate(image_paths): print("\t#{}: {}".format(i, image_path)) image, raw_image = preprocess(image_path) images.append(image) raw_images.append(raw_image) images = torch.stack(images).to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with images 3. Run backward() with a list of specific classes 4. Run generate() to export results """ # ========================================================================= print("Vanilla Backpropagation:") bp = BackPropagation(model=model) probs, ids = bp.forward(images) for i in range(topk): # In this example, we specify the high confidence classes bp.backward(ids=ids[:, [i]]) gradients = bp.generate() # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) if not only_CAM: save_gradient( filename=osp.join( output_dir, "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution:") deconv = Deconvnet(model=model) _ = deconv.forward(images) for i in range(topk): deconv.backward(ids=ids[:, [i]]) gradients = deconv.generate() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) if not only_CAM: save_gradient( filename=osp.join( output_dir, "{}-{}-deconvnet-{}.png".format( j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(images) for i in range(topk): # Guided Backpropagation gbp.backward(ids=ids[:, [i]]) gradients = gbp.generate() # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=target_layer) for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) # Guided Backpropagation if not only_CAM: save_gradient( filename=osp.join( output_dir, "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Grad-CAM save_gradcam( filename=osp.join( output_dir, "{}-{}-{}.png".format( # j, arch, target_layer, classes[ids[j, i]] image_path osp.splitext(image_paths[j])[0], arch, target_layer), ), gcam=regions[j, 0], raw_image=raw_images[j], ) # Guided Grad-CAM if not only_CAM: save_gradient( filename=osp.join( output_dir, "{}-{}-guided_gradcam-{}-{}.png".format( j, arch, target_layer, classes[ids[j, i]]), ), gradient=torch.mul(regions, gradients)[j], )
def fulltest(image_paths, target_layer, arch, topk, output_dir, cuda): """ Visualize model responses given multiple images """ fold = 0 device = get_device(cuda) # Synset words #classes = {0:'normal',1:'covid'} classes = get_classtable() # Model #model = models.resnet34(pretrained=False,num_classes=config.num_classes) best_model = torch.load(config.weights + 'ct-cn/' + 'model_best.pth.tar') print(best_model["state_dict"].keys()) model = make_model(arch, num_classes=config.num_classes, pretrained=True) #best_model = torch.load(config.weights +'model_best.pth.tar') model.load_state_dict(best_model["state_dict"]) print(best_model["state_dict"].keys()) model.to(device) model.eval() # Images images, raw_images = load_images(image_paths) images = torch.stack(images).to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with images 3. Run backward() with a list of specific classes 4. Run generate() to export results """ # ========================================================================= print("Vanilla Backpropagation:") bp = BackPropagation(model=model) probs, ids = bp.forward(images) # sorted print(probs) print(ids) for i in range(topk): bp.backward(ids=ids[:, [i]]) gradients = bp.generate() # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=osp.join( output_dir, "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution:") deconv = Deconvnet(model=model) _ = deconv.forward(images) for i in range(topk): deconv.backward(ids=ids[:, [i]]) gradients = deconv.generate() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=osp.join( output_dir, "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(images) for i in range(topk): # Guided Backpropagation gbp.backward(ids=ids[:, [i]]) gradients = gbp.generate() # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=target_layer) for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) # Guided Backpropagation save_gradient( filename=osp.join( output_dir, "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Grad-CAM save_gradcam( filename=osp.join( output_dir, "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer, classes[ids[j, i]]), ), gcam=regions[j, 0], raw_image=raw_images[j], ) # Guided Grad-CAM save_gradient( filename=osp.join( output_dir, "{}-{}-guided_gradcam-{}-{}.png".format( j, arch, target_layer, classes[ids[j, i]]), ), gradient=torch.mul(regions, gradients)[j], )
def main(image_path, target_layer, arch, topk, cuda): device = torch.device( "cuda" if cuda and torch.cuda.is_available() else "cpu") if cuda: current_device = torch.cuda.current_device() print("Running on the GPU:", torch.cuda.get_device_name(current_device)) else: print("Running on the CPU") # Synset words classes = list() # Model from torchvision model = MGN() """ pickle.load = partial(pickle.load, encoding="latin1") pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1") checkpoint = torch.load("hacnn_market_xent.pth.tar", pickle_module=pickle) #checkpoint = torch.load(args.load_weights) #checkpoint = torch.load("hacnn_market_xent.pth.tar") pretrain_dict = checkpoint['state_dict'] model_dict = model.state_dict() pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()} model_dict.update(pretrain_dict) model.load_state_dict(model_dict) """ pretrain_dict = torch.load("model_700.pt") model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) #model = models.__dict__[arch](pretrained=True) model.to(device) model.eval() place = [] with open("samples/new.txt") as lines: for line in lines: line = line.strip().split(" ", 1)[1] line = line.split(", ", 1)[0].replace(" ", "_") classes.append(line) # Image preprocessing with open("new.txt") as lines: for line in lines: line = line.strip() line = "/mnt/SSD/jzwang/market1501/query/" + line place.append(line) for line in place: image_path = line raw_image = cv2.imread(image_path)[..., ::-1] raw_image = cv2.resize(raw_image, (128, 384)) image = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])(raw_image).unsqueeze(0) image = image.to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with an image 3. Run backward() with a specific class 4. Run generate() to export result """ # ========================================================================= print("Vanilla Backpropagation") bp = BackPropagation(model=model) predictions = bp.forward(image) for i in range(topk): print("[{:.5f}] {}".format(predictions[i][0], classes[predictions[i][1]])) bp.backward(idx=predictions[i][1]) gradient = bp.generate() # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution") deconv = Deconvnet(model=model) _ = deconv.forward(image) for i in range(topk): print("[{:.5f}] {}".format(predictions[i][0], classes[predictions[i][1]])) deconv.backward(idx=predictions[i][1]) gradient = deconv.generate() deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM") gcam = GradCAM(model=model) _ = gcam.forward(image) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(image) t = ['p1', 'p2', 'p3'] for i in range(topk): print("[{:.5f}] {}".format(predictions[i][0], classes[predictions[i][1]])) # Grad-CAM for target_layer in t: gcam.backward(idx=predictions[i][1]) #print("1") region = gcam.generate(target_layer=target_layer) #print(2) line = line.strip().split("/")[-1] save_gradcam( "results/{}-gradcam-{}.png".format(line, target_layer), region, raw_image, ) #print(3) # Guided Backpropagation gbp.backward(idx=predictions[i][1]) gradient = gbp.generate() # Guided Grad-CAM h, w, _ = gradient.shape region = cv2.resize(region, (w, h))[..., np.newaxis] output = gradient * region
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda): """ Visualize model responses given multiple images """ # check if CUDA is available train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available. Training on CPU ...') else: print('CUDA is available! Training on GPU ...') device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision PRE_MODEL_DIR = '/content/gdrive/My Drive/UnB/TCC-1/TCC1-1-dataset-final/restnet_model152_trained_exp7.pt' model_name = 'resnet' num_classes = 9 feature_extract = False model, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True) if train_on_gpu: state = torch.load(PRE_MODEL_DIR) else: state = torch.load(PRE_MODEL_DIR, map_location='cpu') # Loading weights in restnet architecture model.load_state_dict(state['state_dict']) model.to(device) model.eval() # Images images = [] raw_images = [] print("Images:") for i, image_path in enumerate(image_paths): print("\t#{}: {}".format(i, image_path)) image, raw_image = preprocess(image_path) images.append(image) raw_images.append(raw_image) images = torch.stack(images).to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with images 3. Run backward() with a list of specific classes 4. Run generate() to export results """ # ========================================================================= print("Vanilla Backpropagation:") bp = BackPropagation(model=model) probs, ids = bp.forward(images) for i in range(topk): # In this example, we specify the high confidence classes bp.backward(ids=ids[:, [i]]) gradients = bp.generate() # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=osp.join( output_dir, "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution:") deconv = Deconvnet(model=model) _ = deconv.forward(images) for i in range(topk): deconv.backward(ids=ids[:, [i]]) gradients = deconv.generate() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=osp.join( output_dir, "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(images) for i in range(topk): # Guided Backpropagation gbp.backward(ids=ids[:, [i]]) gradients = gbp.generate() # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=target_layer) for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) # Guided Backpropagation save_gradient( filename=osp.join( output_dir, "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Grad-CAM save_gradcam( filename=osp.join( output_dir, "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer, classes[ids[j, i]]), ), gcam=regions[j, 0], raw_image=raw_images[j], ) # Guided Grad-CAM save_gradient( filename=osp.join( output_dir, "{}-{}-guided_gradcam-{}-{}.png".format( j, arch, target_layer, classes[ids[j, i]]), ), gradient=torch.mul(regions, gradients)[j], )
def visualization(image_paths, model_name, model_path, target_layer, arch, topk, output_dir, cuda): """ Visualize model responses given multiple images """ device = get_device(cuda) # Synset words classes = get_classtable() # Model kwargs = {} module = import_module(model_name) model = module.make_model().to(device) model.load_state_dict(torch.load(model_path, **kwargs), strict=False) #print(model) model.to(device) model.eval() # Images images = [] raw_images = [] print("Images:") path = gb.glob(image_paths[0] + '/*.jpg') index = 0 for img in path: print("\t#{}: {}".format(index, img)) image, raw_image = preprocess(img) images.append(image) raw_images.append(raw_image) index = index + 1 images = torch.stack(images).to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with images 3. Run backward() with a list of specific classes 4. Run generate() to export results """ # ========================================================================= print("Vanilla Backpropagation:") bp = BackPropagation(model=model) probs, ids = bp.forward(images) #print(probs, ids) for i in range(topk): # In this example, we specify the high confidence classes bp.backward(ids=ids[:, [i]]) gradients = bp.generate() # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, ids[j, i], probs[j, i])) save_gradient( filename=osp.join( output_dir, #"{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]), "{}-{}-vanilla-{}.png".format(j, arch, ids[j, i]), ), gradient=gradients[j], ) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution:") deconv = Deconvnet(model=model) _ = deconv.forward(images) for i in range(topk): deconv.backward(ids=ids[:, [i]]) gradients = deconv.generate() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, ids[j, i], probs[j, i])) save_gradient( filename=osp.join( output_dir, #"{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j, i]]), "{}-{}-deconvnet-{}.png".format(j, arch, ids[j, i]), ), gradient=gradients[j], ) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(images) for i in range(topk): # Guided Backpropagation gbp.backward(ids=ids[:, [i]]) #print(ids[:, [i]]) gradients = gbp.generate() # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=target_layer) #print(regions) for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, ids[j, i], probs[j, i])) # Guided Backpropagation save_gradient( filename=osp.join( output_dir, #"{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]), "{}-{}-guided-{}.png".format(j, arch, ids[j, i]), ), gradient=gradients[j], ) # Grad-CAM save_gradcam( filename=osp.join( output_dir, "{}-{}-gradcam-{}-{}.png".format( #j, arch, target_layer, classes[ids[j, i]] j, arch, target_layer, ids[j, i]), ), gcam=regions[j, 0], raw_image=raw_images[j], ) #print(regions.shape) # Guided Grad-CAM save_gradient( filename=osp.join( output_dir, "{}-{}-guided_gradcam-{}-{}.png".format( #j, arch, target_layer, classes[ids[j, i]] j, arch, target_layer, ids[j, i]), ), gradient=torch.mul(regions, gradients)[j], ) # ========================================================================= print("Channel Visialization:") gcam = GradCAM(model=model) _ = gcam.forward(images) feature_map = gcam.channel_visualization(target_layer=target_layer) draw_features(4, 4, feature_map, output_dir, target_layer)
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda): """ Visualize model responses given multiple images """ device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision model = models.__dict__[arch](pretrained=True) model.to(device) model.eval() # Images images = [] raw_images = [] print("Images:") for i, image_path in enumerate(image_paths): print("\t#{}: {}".format(i, image_path)) image, raw_image = preprocess(image_path) images.append(image) raw_images.append(raw_image) images = torch.stack(images).to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with images 3. Run backward() with a list of specific classes 4. Run generate() to export results """ # ========================================================================= print("Vanilla Backpropagation:") bp = BackPropagation(model=model) probs, ids = bp.forward(images) for i in range(topk): # In this example, we specify the high confidence classes bp.backward(ids=ids[:, [i]]) gradients = bp.generate() # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=osp.join( output_dir, "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution:") deconv = Deconvnet(model=model) _ = deconv.forward(images) for i in range(topk): deconv.backward(ids=ids[:, [i]]) gradients = deconv.generate() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=osp.join( output_dir, "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(images) for i in range(topk): # Guided Backpropagation gbp.backward(ids=ids[:, [i]]) gradients = gbp.generate() # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=target_layer) for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) # Guided Backpropagation save_gradient( filename=osp.join( output_dir, "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]), ), gradient=gradients[j], ) # Grad-CAM save_gradcam( filename=osp.join( output_dir, "{}-{}-gradcam-{}-{}.png".format( j, arch, target_layer, classes[ids[j, i]] ), ), gcam=regions[j, 0], raw_image=raw_images[j], ) # Guided Grad-CAM save_gradient( filename=osp.join( output_dir, "{}-{}-guided_gradcam-{}-{}.png".format( j, arch, target_layer, classes[ids[j, i]] ), ), gradient=torch.mul(regions, gradients)[j], )
def main(image_path, target_layer, arch, topk, cuda): device = torch.device( "cuda" if cuda and torch.cuda.is_available() else "cpu") if cuda: current_device = torch.cuda.current_device() print("Running on the GPU:", torch.cuda.get_device_name(current_device)) else: print("Running on the CPU") # Synset words classes = list() with open("samples/synset_words.txt") as lines: for line in lines: line = line.strip().split(" ", 1)[1] line = line.split(", ", 1)[0].replace(" ", "_") classes.append(line) # Model from torchvision model = models.__dict__[arch](pretrained=True) model.to(device) model.eval() # Image preprocessing raw_image = cv2.imread(image_path)[..., ::-1] raw_image = cv2.resize(raw_image, (224, ) * 2) image = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])(raw_image).unsqueeze(0) image = image.to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with an image 3. Run backward() with a specific class 4. Run generate() to export result """ # ========================================================================= print("Vanilla Backpropagation") bp = BackPropagation(model=model) predictions = bp.forward(image) for i in range(topk): print("[{:.5f}] {}".format(predictions[i][0], classes[predictions[i][1]])) bp.backward(idx=predictions[i][1]) gradient = bp.generate() save_gradient( "results/{}-vanilla-{}.png".format(arch, classes[predictions[i][1]]), gradient, ) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution") deconv = Deconvnet(model=model) _ = deconv.forward(image) for i in range(topk): print("[{:.5f}] {}".format(predictions[i][0], classes[predictions[i][1]])) deconv.backward(idx=predictions[i][1]) gradient = deconv.generate() save_gradient( "results/{}-deconvnet-{}.png".format(arch, classes[predictions[i][1]]), gradient, ) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM") gcam = GradCAM(model=model) _ = gcam.forward(image) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(image) for i in range(topk): print("[{:.5f}] {}".format(predictions[i][0], classes[predictions[i][1]])) # Grad-CAM gcam.backward(idx=predictions[i][1]) region = gcam.generate(target_layer=target_layer) save_gradcam( "results/{}-gradcam-{}-{}.png".format(arch, target_layer, classes[predictions[i][1]]), region, raw_image, ) # Guided Backpropagation gbp.backward(idx=predictions[i][1]) gradient = gbp.generate() # Guided Grad-CAM h, w, _ = gradient.shape region = cv2.resize(region, (w, h))[..., np.newaxis] output = gradient * region save_gradient( "results/{}-guided-{}.png".format(arch, classes[predictions[i][1]]), gradient, ) save_gradient( "results/{}-guided_gradcam-{}-{}.png".format( arch, target_layer, classes[predictions[i][1]]), output, )
def main(): # Dataset print('Creating dataset...') transform_val= transforms.Compose([ transforms.ToTensor(), transforms.Normalize(MEAN, STD) ]) valset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_val) val_loader = DataLoader(valset, batch_size=100,shuffle=False, num_workers=4, pin_memory=True) # Model checkpoint = os.path.join(args.checkpoint, args.model + "_" + args.attention) model_path = os.path.join(checkpoint, args.attention + '_' + 'best_model.pt') print('Loading model...') model = get_model(args.model,'bn',args.attention) if os.path.exists(model_path): model.load_state_dict(torch.load(model_path)) else: raise Exception('Cannot find model', model_path) # if torch.cuda.device_count() > 1: # print("Using", torch.cuda.device_count(), "GPUs!") # model = nn.DataParallel(model) model.cuda() cudnn.benchmark = True print('\tModel loaded: ' + args.model) print('\tAttention type: ' + args.attention) print("\tNumber of parameters: ", sum([param.nelement() for param in model.parameters()])) result_path = os.path.join('results', args.model + "_" + args.attention) if not os.path.exists(result_path): os.makedirs(result_path) if True: image_paths = get_image_links() images = [] raw_images = [] print("Images:") for i, image_path in enumerate(image_paths): print("\t#{}: {}".format(i, image_path)) image, raw_image = preprocess(image_path) images.append(image) raw_images.append(raw_image) images = torch.stack(images).to("cuda") model.eval() # if False: # summary(model, (3, 32, 32)) # return # Get sample for evaluate GET_SAMPLE = False if GET_SAMPLE: for i, (inputs, labels) in enumerate(val_loader): inputs, labels = (Variable(inputs.cuda()), Variable(labels.cuda())) outputs = model(inputs) _, preds = outputs.topk(1, 1, True, True) for sample in SAMPLES: image = get_image(inputs[sample]) save_image(result_path, image, sample, preds[sample], labels[sample]) break print("Vanilla Backpropagation:") topk = 1 target_layer = "layer4.2" bp = BackPropagation(model=model) probs, ids = bp.forward(images) for i in range(topk): # In this example, we specify the high confidence classes bp.backward(ids=ids[:, [i]]) gradients = bp.generate() # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, LABELS[ids[j, i]], probs[j, i])) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution:") deconv = Deconvnet(model=model) _ = deconv.forward(images) for i in range(topk): deconv.backward(ids=ids[:, [i]]) gradients = deconv.generate() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, LABELS[ids[j, i]], probs[j, i])) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(images) for i in range(topk): # Guided Backpropagation gbp.backward(ids=ids[:, [i]]) gradients = gbp.generate() # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=target_layer) for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, LABELS[ids[j, i]], probs[j, i])) # Grad-CAM save_gradcam( filename=osp.join( result_path, "{}-gradcam-{}-{}.png".format( j, target_layer, LABELS[ids[j, i]] ), ), gcam=regions[j, 0], raw_image=raw_images[j], ) print('Finish!!!')