def visualize(self, model, target_layer, input_img, label, output_path, output_prefix): grad_cam = gradcam.GradCam(model, target_layer=target_layer) preprocessed_img = GradCamVisualizer._preprocess_img(input_img) # Save original image preprocessed_img.save(os.path.join(output_path, output_prefix) + "original.png") # Generate cam mask inputs = torch.from_numpy(np.expand_dims(input_img, 0)) inputs = inputs.type(torch.FloatTensor) cam = grad_cam.generate_cam(inputs, label) # Save mask gradcam_misc.save_class_activation_images(preprocessed_img, cam, os.path.join(output_path, output_prefix)) return
if __name__ == '__main__': args = get_parser().parse_args() # Get params target_example = 2 # Snake (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\ get_example_params(target_example) model = NetFactory.createNet(args.model) pretrained_model = model.model pretrained_model = models.alexnet(pretrained=True) augment = False use_gpu = False target_class = 1 train_loader, valid_loader, test_loader = load_data(augment, use_gpu) dataiter = iter(train_loader) vol, label = dataiter.next() print(vol.shape) prep_img = vol[:, int(vol.shape[1] / 2), :, :] #original_image = vol[:, 75, :, :] print(prep_img.shape) # Grad cam grad_cam = GradCam(pretrained_model, target_layer=11) # Generate cam mask cam = grad_cam.generate_cam(prep_img, target_class) # Save mask save_class_activation_images(original_image, cam, file_name_to_export) print('Grad cam completed')
fc_fea = weight_fc[class_idx] fc_fea_sm = np.zeros(nc) j = -1 for i in range(len(fc_fea)): if j % 4 == 0: j += 1 fc_fea_sm[j] += fc_fea[i] #pdb.set_trace() cam = fc_fea_sm.dot(feature_conv.reshape((nc, h * w))) cam = cam.reshape(h, w) cam = cam - np.min(cam) cam_img = cam / np.max(cam) return [cam_img] weight_softmax_params = list(model._modules.get('fc').parameters()) weight_softmax = np.squeeze(weight_softmax_params[0].cpu().data.numpy()) overlay = getCAM(activated_features.features, weight_softmax, class_idx) imnew = skimage.transform.resize(overlay[0], tensor.shape[1:3]) image = image.resize((tensor.shape[1], tensor.shape[2]), Image.ANTIALIAS) print('imshape', image.size, 'imnewshape', imnew.shape) imname = 'camon' + str(class_idx) + '_' + 'p' + str( pred_class_idx.item()) + '_' + imname print('predicted_class', pred_class_idx.item()) save_class_activation_images(image, imnew, modelname + '_' + imname)
def test_net(net,device): test_results = os.path.join(opt.saveroot, 'test_results') net.eval() images_student= np.zeros((1, opt.input_size[2], opt.input_size[0], opt.input_size[1])) images_teacher = np.zeros((1, opt.input_size[2], opt.input_size[0], opt.input_size[1])) resultlist=[] gtlist=[] namelist=[] num=0 studentmetric = np.zeros([4, 4]) for classnum in range(1, 5): dirs = os.listdir(os.path.join(opt.dataroot,'Test','S',str(classnum))) dirs = natsort.natsorted(dirs) for testfile in dirs: images_student[0, :, :, :] =np.array(imageio.imread(os.path.join(opt.dataroot, 'Test', 'S', str(classnum), testfile))).transpose(2, 0, 1) images_teacher[0, :, :, :] =np.array(imageio.imread(os.path.join(opt.dataroot, 'Test', 'T', str(classnum), testfile))).transpose(2, 0, 1) images_s = torch.from_numpy(images_student) images_t = torch.from_numpy(images_teacher) images_s = images_s.to(device=device, dtype=torch.float32) images_t = images_t.to(device=device, dtype=torch.float32) start_time=time.time() if opt.Network_mode=='S': pred,_= net(images_s) if opt.Network_mode=='T': pred,_= net(images_t) if opt.Network_mode=='ST': pred,_,_,_= net(images_s,images_t) end_time=time.time() if opt.print_cam: gradcam = grad_cam.GradCam(net.teachernet, 1) # Generate cam mask cam = gradcam.generate_cam(images_t, target_class=classnum-1) # Save mask image_visual=images_t.squeeze(0).cpu().numpy().transpose(1,2,0) image_visual=Image.fromarray(np.uint8(image_visual),'RGB') save_class_activation_images(image_visual, cam, os.path.join('/home/limingchao/PycharmProjects/untitled/BJ_Classification_pytorch/LXD_result/gradcam/',str(classnum),testfile)) pred = torch.argmax(pred, dim=1) result=pred.cpu().detach().numpy()[0] print(testfile,result,end_time-start_time) studentmetric[result, classnum - 1] += 1 num+=1 namelist.append(testfile) resultlist.append(result) gtlist.append(classnum - 1) kappa=Kappa.quadratic_weighted_kappa(resultlist,gtlist) np.save(os.path.join(opt.saveroot,'results',opt.backbone +'_'+ opt.Network_mode+'.npy'),resultlist) np.save(os.path.join(opt.saveroot, 'results', 'namelist.npy'), namelist) np.save(os.path.join(opt.saveroot, 'results', 'gtlist.npy'), gtlist) acc=(studentmetric[0,0]+studentmetric[1,1]+studentmetric[2,2]+studentmetric[3,3])/num print(studentmetric) print('acc:',acc) print('kappa:',kappa) # visual ax = sns.heatmap(studentmetric, cmap="Blues", # 图中的主色调 xticklabels=[1, 2, 3, 4], # 预测标签 yticklabels=[1, 2, 3, 4], # 真实标签 linewidths=.5, # 格子与格子之间的空隙 square=True, # 图是方的 fmt="g", # 图中每个方格数字的格式化方式,g代表完整输出 annot=True) # 允许注释 # 下面四行是兼容性代码,为了兼容新版的plt b, t = plt.ylim() b += 0.5 t -= 0.5 plt.ylim(b, t) # 非Ipython环境要加下面一句 plt.show()
with torch.no_grad(): net.zero_grad() output = net(torch.unsqueeze(combined_image, 0).to(device)) output_softmax = softmax(output) pred_prob = output_softmax[:, 1] print("Probability of surgery:::{:6.3f}".format(float(pred_prob))) target_class = 1 if pred_prob >= 0.5 else 0 original_image_sag = get_example_params(img_path_sag) original_image_trans = get_example_params(img_path_trans) # Grad cam sag_filename_id = os.path.join(outdir, sag_path) trans_filename_id = os.path.join(outdir, trans_path) grad_cam = GradCam(net, view='sag') # Generate cam mask cam = grad_cam.generate_cam(combined_image, target_class) # Save mask save_class_activation_images(original_image_sag, cam, sag_filename_id) grad_cam = GradCam(net, view='trans') # Generate cam mask cam = grad_cam.generate_cam(combined_image, target_class) # Save mask save_class_activation_images(original_image_trans, cam, trans_filename_id) print('Grad cam completed')
val_loader = DataLoader(val_data, batch_size=1, shuffle=False, num_workers=6) for i, (data, labels_pro, img_path) in enumerate(val_loader): if i > 10: break img_path = img_path[0] original_image = read_image(img_path) original_image = original_image.resize((192, 64)) file_name_to_export = img_path[img_path.rfind('/') + 1:img_path.rfind('.')] # Grad cam grad_cam = GradCam(pretrained_model) # Generate cam mask single # cam = grad_cam.generate_cam( # data, labels_pro, num_classes=len(cfg.alphabets)) for l in range(3, 26): cam = grad_cam.generate_cam(data, labels_pro, num_classes=len(cfg.alphabets), nl=l) # Save mask save_class_activation_images( original_image, cam, "AM_" + str(l) + "_" + file_name_to_export, file_name_to_export.split('_')[0]) print('Grad cam completed')
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) for model_name in models: print(model_name) Config.backbone = model_name model = MainModel(Config) model_dict = model.state_dict() pretrained_dict = torch.load(weights[model_name]) pretrained_dict = { k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict } model_dict.update(pretrained_dict) model.load_state_dict(model_dict) model.eval() grad_cam = GradCam(model) save_path = os.path.join('./good_case_2/{}/'.format(model_name)) for img in tqdm(imgs): original_image = pil_loader(img) img_name = img.split('/')[-1].split('.')[0] target_class = int(img_name.split('_')[0]) img_tensor = transformer(original_image).unsqueeze(0) cam, pred_class = grad_cam.generate_cam(img_tensor, target_class) resize_img = original_image.resize((448, 448)) string_pred_class = '-'.join( [str(pred_class[i]) for i in range(3)]) save_class_activation_images( resize_img, cam, img_name + '_pred{}'.format(pred_class), save_path) print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')