else: comp_idx = WBC_id item_id = (np.where(idx.cpu().numpy() == (WBC_id)))[0][0] gcam.backward(idx=comp_idx) output = gcam.generate(target_layer = 'layer4.2') # for resnet heatmap = output original = inputs.data.cpu().numpy() original = np.transpose(original, (0,2,3,1))[0] original = original * cf.std + cf.mean original = np.uint8(original * 255.0) mask = np.uint8(heatmap * 255.0) blank_heatmap[:, (mode*160):480+(mode*160)] = cv2.resize(heatmap, (480, 480)) mode += 1 blank_heatmap[blank_heatmap > 1] = 1 blank_heatmap = cv2.GaussianBlur(blank_heatmap, (15, 15), 0) blank_mask = np.uint8(blank_heatmap * 255.0) check_and_mkdir("./results/BCCD/heatmaps") check_and_mkdir("./results/BCCD/masks") save_dir = "./results/BCCD/heatmaps/" + f mask_dir = "./results/BCCD/masks/" + f gcam.save(save_dir, blank_heatmap, background_img) cv2.imwrite(mask_dir, blank_mask)
probs, idx = gcam.forward(inputs) #probs, idx = gbp.forward(inputs) # Grad-CAM gcam.backward(idx=WBC_id) output = gcam.generate(target_layer='layer4.2') # Guided Back Propagation #gbp.backward(idx=WBC_id) #feature = gbp.generate(target_layer='conv1') # Guided Grad-CAM #output = np.multiply(feature, region) gcam.save('./results/%s.png' % str(i), output, cropped) cv2.imwrite('./results/map%s.png' % str(i), cropped) for j in range(3): print('\t{:5f}\t{}\n'.format(probs[j], dset_classes[idx[j]])) """ @ Code for extracting the Top-3 Results for each image topk = 3 for i in range(0, topk): gcam.backward(idx=idx[i]) output = gcam.generate(target_layer='layer4.2') gcam.save('./results/{}_gcam.png'.format(dset_classes[idx[i]]), output, center_cropped) print('\t{:.5f}\t{}'.format(probs[i], dset_classes[idx[i]])) """
gcam.backward(idx=comp_idx) output = gcam.generate(target_layer = 'layer4.2') # for resnet heatmap = output #original = inputs.data.cpu().numpy() #original = np.transpose(original, (0,2,3,1))[0] #original = original * cf.std + cf.mean #original = np.uint8(original * 255.0) original = cv2.imread(file_name) if (args.preprocess == 'pad'): heatmap = cv2.resize(heatmap, (640, 640)) heatmap = heatmap[80:560, :] else: original = cv2.resize(original, (in_size, in_size)) #heatmap[heatmap < 0.7] = 0 #heatmap = cv2.GaussianBlur(heatmap, (15, 15), 0) mask = np.uint8(heatmap * 255.0) check_and_mkdir("./results/BCCD/heatmaps") check_and_mkdir("./results/BCCD/masks") save_dir = "./results/BCCD/heatmaps/" + f mask_dir = "./results/BCCD/masks/" + f gcam.save(save_dir, heatmap, original) cv2.imwrite(mask_dir, mask)
if (img_cnt >= len(heatmap_lst)): check_and_mkdir('./results/%s' % cf.name) check_and_mkdir('./results/%s/heatmaps/' % cf.name) check_and_mkdir('./results/%s/masks/' % cf.name) blank_canvas[blank_canvas > 1] = 1 blank_canvas = cv2.GaussianBlur(blank_canvas, (15, 15), 0) blank_save = np.uint8(blank_canvas * 255.0) if args.subtype == None: save_dir = './results/%s/heatmaps/%s.png' % ( cf.name, file_name.split(".")[-2].split("/")[-1]) save_mask = './results/%s/masks/%s.png' % ( cf.name, file_name.split(".")[-2].split("/")[-1]) else: save_dir = './results/%s/heatmaps/%s_%s.png' % ( cf.name, file_name.split(".")[-2].split("/")[-1], args.subtype) save_mask = './results/%s/masks/%s_%s.png' % ( cf.name, file_name.split(".")[-2].split("/")[-1], args.subtype) # Save the grad-cam results print("| Saving Heatmap results... ") gcam.save(save_dir, blank_canvas, original_image) # save heatmaps print("| Saving Mask results... ") cv2.imwrite(save_mask, blank_save) # save image masks print("| Feature map completed!") sys.exit(0)
def main(args): # Load the synset words idx2cls = list() with open('samples/synset_words.txt') as lines: for line in lines: line = line.strip().split(' ', 1)[1] line = line.split(', ', 1)[0].replace(' ', '_') idx2cls.append(line) print('Loading a model...') model = torchvision.models.resnet152(pretrained=True) transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) print('\nGrad-CAM') gcam = GradCAM(model=model, target_layer='layer4.2', cuda=args.cuda) gcam.load_image(args.image, transform) gcam.forward() for i in range(0, 3): gcam.backward(idx=gcam.idx[i]) cls_name = idx2cls[gcam.idx[i]] output = gcam.generate() print('\t{:.5f}\t{}'.format(gcam.prob[i], cls_name)) gcam.save('results/{}_gcam.png'.format(cls_name), output) print('\nVanilla Backpropagation') bp = BackPropagation(model=model, target_layer='conv1', cuda=args.cuda) bp.load_image(args.image, transform) bp.forward() for i in range(0, 3): bp.backward(idx=bp.idx[i]) cls_name = idx2cls[bp.idx[i]] output = bp.generate() print('\t{:.5f}\t{}'.format(bp.prob[i], cls_name)) bp.save('results/{}_bp.png'.format(cls_name), output) print('\nGuided Backpropagation') gbp = GuidedBackPropagation(model=model, target_layer='conv1', cuda=args.cuda) gbp.load_image(args.image, transform) gbp.forward() for i in range(0, 3): cls_idx = gcam.idx[i] cls_name = idx2cls[cls_idx] gcam.backward(idx=cls_idx) output_gcam = gcam.generate() gbp.backward(idx=cls_idx) output_gbp = gbp.generate() output_gcam -= output_gcam.min() output_gcam /= output_gcam.max() output_gcam = cv2.resize(output_gcam, (224, 224)) output_gcam = cv2.cvtColor(output_gcam, cv2.COLOR_GRAY2BGR) output = output_gbp * output_gcam print('\t{:.5f}\t{}'.format(gbp.prob[i], cls_name)) gbp.save('results/{}_gbp.png'.format(cls_name), output_gbp) gbp.save('results/{}_ggcam.png'.format(cls_name), output)
def main(image_path, arch, topk, cuda): CONFIG = { 'resnet152': { 'target_layer': 'layer4.2', 'input_size': 224 }, 'vgg19': { 'target_layer': 'features.35', 'input_size': 224 }, 'inception_v3': { 'target_layer': 'Mixed_7c', 'input_size': 299 }, }.get(arch) cuda = cuda and torch.cuda.is_available() # Synset words classes = list() with open('samples/synset_words.txt') as lines: for line in lines: line = line.strip().split(' ', 1)[1] line = line.split(', ', 1)[0].replace(' ', '_') classes.append(line) # Model model = models.__dict__[arch](pretrained=True) transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # Image raw_image = cv2.imread(image_path)[:, :, ::-1] raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2) image = transform(raw_image).unsqueeze(0) image = Variable(image, volatile=False, requires_grad=True) if cuda: model.cuda() image = image.cuda() print('1. Grad-CAM') gcam = GradCAM(model=model, cuda=cuda) probs, idx = gcam.forward(image) for i in range(0, topk): gcam.backward(idx=idx[i]) output = gcam.generate(target_layer=CONFIG['target_layer']) gcam.save('results/{}_gcam_{}.png'.format(classes[idx[i]], arch), output, raw_image) # NOQA print('\t{:.5f}\t{}'.format(probs[i], classes[idx[i]])) print('2. Vanilla Backpropagation') bp = BackPropagation(model=model, cuda=cuda) probs, idx = bp.forward(image) for i in range(0, topk): bp.backward(idx=idx[i]) output = bp.generate() bp.save('results/{}_bp_{}.png'.format(classes[idx[i]], arch), output) print('\t{:.5f}\t{}'.format(probs[i], classes[idx[i]])) print('3. Guided Backpropagation/Grad-CAM') gbp = GuidedBackPropagation(model=model, cuda=cuda) probs, idx = gbp.forward(image) for i in range(0, topk): gcam.backward(idx=idx[i]) region = gcam.generate(target_layer=CONFIG['target_layer']) gbp.backward(idx=idx[i]) feature = gbp.generate() output = feature * region[:, :, np.newaxis] gbp.save('results/{}_gbp_{}.png'.format(classes[idx[i]], arch), feature) # NOQA gbp.save('results/{}_ggcam_{}.png'.format(classes[idx[i]], arch), output) # NOQA print('\t{:.5f}\t{}'.format(probs[i], classes[idx[i]]))