def visualize(self, inp_data, cmap_name='custom blue', colors=None, N=256, methods=['original_image', 'heat_map'], signs=["all", "positive"], nt_type='smoothgrad'): dl = self.dls.test_dl(L(inp_data), with_labels=True, bs=1) self.enc_inp, self.enc_preds = dl.one_batch() dec_data = dl.decode((self.enc_inp, self.enc_preds)) self.dec_img, self.dec_pred = dec_data[0][0], dec_data[1][0] self.colors = [(0, '#ffffff'), (0.25, '#000000'), (1, '#000000')] if colors is None else colors attributions_ig_nt = self._noise_tunnel.attribute( self.enc_inp.to(self.dl.device), n_samples=1, nt_type=nt_type, target=self.enc_preds) default_cmap = LinearSegmentedColormap.from_list(cmap_name, self.colors, N=N) _ = viz.visualize_image_attr_multiple( np.transpose(attributions_ig_nt.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(self.dec_img.numpy(), (1, 2, 0)), methods, signs, cmap=default_cmap, show_colorbar=True, titles=[f'Original Image - ({self.dec_pred})', 'Noise Tunnel'])
def get_insights(self, tensor_data, _, target=0): default_cmap = LinearSegmentedColormap.from_list( "custom blue", [(0, "#ffffff"), (0.25, "#0000ff"), (1, "#0000ff")], N=256, ) attributions_ig, _ = self.attribute_image_features( self.ig, tensor_data, baselines=tensor_data * 0, return_convergence_delta=True, n_steps=15, ) matplot_viz_ig, _ = viz.visualize_image_attr_multiple( np.transpose(attributions_ig.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(tensor_data.squeeze().cpu().detach().numpy(), (1, 2, 0)), use_pyplot=False, methods=["original_image", "heat_map"], cmap=default_cmap, show_colorbar=True, signs=["all", "positive"], titles=["Original", "Integrated Gradients"], ) ig_bytes = self.output_bytes(matplot_viz_ig) output = [ {"b64": b64encode(row).decode("utf8")} if isinstance(row, (bytes, bytearray)) else row for row in [ig_bytes] ] return output
def visualize(self, inp_data, n_steps=200, cmap_name='custom blue', colors=None, N=256, methods=['original_image', 'heat_map'], signs=["all", "positive"], outlier_perc=1): dl = self.dls.test_dl([inp_data], with_labels=True, bs=1) self.enc_inp, self.enc_preds = dl.one_batch() dec_data = dl.decode((self.enc_inp, self.enc_preds)) self.dec_img, self.dec_pred = dec_data[0][0], dec_data[1][0] self.colors = [(0, '#ffffff'), (0.25, '#000000'), (1, '#000000')] if colors is None else colors self.attributions_ig = self.integrated_gradients.attribute( self.enc_inp.to(self.dl.device), target=self.enc_preds, n_steps=200) default_cmap = LinearSegmentedColormap.from_list(cmap_name, self.colors, N=N) _ = viz.visualize_image_attr_multiple( np.transpose(self.attributions_ig.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(self.dec_img.numpy(), (1, 2, 0)), methods=methods, cmap=default_cmap, show_colorbar=True, signs=signs, outlier_perc=outlier_perc, titles=[f'Original Image - ({self.dec_pred})', 'IG'])
def _viz(self,attributions,dec_data,metric): default_cmap = LinearSegmentedColormap.from_list(self.cmap_name,self.colors, N=self.N) _ = viz.visualize_image_attr_multiple(np.transpose(attributions.squeeze().cpu().detach().numpy(), (1,2,0)), np.transpose(dec_data[0].numpy(), (1,2,0)), methods=self.methods, cmap=default_cmap, show_colorbar=True, signs=self.signs, outlier_perc=self.outlier_perc, titles=[f'Original Image - ({dec_data[1]})', metric])
def _attr_occlusion(self, input, pred_label_idx, w_size=15): occlusion = Occlusion(self.model) attributions_occ = occlusion.attribute(input, strides=(3, int(w_size / 2), int(w_size / 2)), target=pred_label_idx, sliding_window_shapes=(3, w_size, w_size), baselines=0) _ = viz.visualize_image_attr_multiple( np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(input.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map"], ["all", "positive"], show_colorbar=True, outlier_perc=2, )
def _attr_noise_tunnel(self, input, pred): attr_algo = NoiseTunnel(IntegratedGradients(self.model)) default_cmap = LinearSegmentedColormap.from_list('custom blue', [(0, '#ffffff'), (0.25, '#000000'), (1, '#000000')], N=256) attr_ = attr_algo.attribute(input, n_samples=10, nt_type='smoothgrad_sq', target=pred) _ = viz.visualize_image_attr_multiple( np.transpose(attr_.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(input.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map"], ["all", "positive"], cmap=default_cmap, show_colorbar=True)
def visualize(self,inp_data,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],strides = (3, 4, 4), sliding_window_shapes=(3,15, 15), outlier_perc=2): dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=1) self.dec_img,self.dec_pred=self._formatted_data_iter(dl) attributions_occ = self._occlusion.attribute(self.dec_img, strides = strides, target=self.dec_pred, sliding_window_shapes=sliding_window_shapes, baselines=0) self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors default_cmap = LinearSegmentedColormap.from_list(cmap_name, self.colors, N=N) _ = viz.visualize_image_attr_multiple(np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1,2,0)), np.transpose(self.dec_img.squeeze().cpu().numpy(), (1,2,0)),methods,signs, cmap=default_cmap, show_colorbar=True, outlier_perc=outlier_perc,titles=[f'Original Image - ({self.dec_pred.cpu().item()})', 'Occlusion'] )
def show_attributions(attr, img, predicted_label, true_label, show_map=False, save=None): # Convert to numpy attr = np.transpose(attr.squeeze(0).cpu().detach().numpy(), (1, 2, 0)) img = np.transpose(img.squeeze(0).cpu().detach().numpy(), (1, 2, 0)) # What to show if show_map: methods = ["original_image", "heat_map", "blended_heat_map"] signs = ["all", "positive", "positive"] else: methods = ["original_image", "blended_heat_map"] signs = ["all", "positive"] # Show fig, axis = viz.visualize_image_attr_multiple(attr, img, methods, signs, cmap=cm.seismic, show_colorbar=True, outlier_perc=1, use_pyplot=(save is None)) if predicted_label == 0: predicted_text = 'Negative' else: predicted_text = 'Positive' if true_label == 0: true_text = 'Negative' else: true_text = 'Positive' fig.suptitle('True Label: {} Predicted: {}'.format(true_text, predicted_text), fontsize=20) # Check save if save is not None: fig.savefig(save)
def _visualize(self,inp_data,n_steps=200,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],outlier_perc=1,baseline_type='zeros'): self._integrated_gradients = self._integrated_gradients if hasattr(self,'_integrated_gradients') else IntegratedGradients(self.model) dl = self.dls dec_data=dl.after_item(inp_data) dec_pred=inp_data[1] dec_img=dec_data[0] enc_inp,enc_preds=dl.after_batch(to_device(dl.before_batch(dec_data),dl.device)) baseline=self.get_baseline_img(enc_inp,baseline_type).to(dl.device) colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors attributions_ig = self._integrated_gradients.attribute(enc_inp,baseline, target=enc_preds, n_steps=200) default_cmap = LinearSegmentedColormap.from_list(cmap_name,colors, N=N) _ = viz.visualize_image_attr_multiple(np.transpose(attributions_ig.squeeze().cpu().detach().numpy(), (1,2,0)), np.transpose(dec_img.numpy(), (1,2,0)), methods=methods, cmap=default_cmap, show_colorbar=True, signs=signs, outlier_perc=outlier_perc, titles=[f'Original Image - ({dec_pred})', 'IG'])
def measure_filter_model( model_version, dataset, out_folder, weights_dir, device, method=METHODS["gradcam"], sample_images=50, step=1, use_infidelity=False, use_sensitivity=False, render=False, ids=None, ): invTrans = get_inverse_normalization_transformation() data_dir = os.path.join("data") if model_version == "resnet18": model = create_resnet18_model(num_of_classes=NUM_OF_CLASSES[dataset]) elif model_version == "resnet50": model = create_resnet50_model(num_of_classes=NUM_OF_CLASSES[dataset]) elif model_version == "densenet": model = create_densenet121_model( num_of_classes=NUM_OF_CLASSES[dataset]) else: model = create_efficientnetb0_model( num_of_classes=NUM_OF_CLASSES[dataset]) model.load_state_dict(torch.load(weights_dir)) # print(model) model.eval() model.to(device) test_dataset = CustomDataset( dataset=dataset, transformer=get_default_transformation(), data_type="test", root_dir=data_dir, step=step, add_filters=True, ids=ids, ) data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4) try: image_ids = random.sample(range(0, test_dataset.__len__()), test_dataset.__len__()) except ValueError: raise ValueError( f"Image sample number ({test_dataset.__len__()}) exceeded dataset size ({test_dataset.__len__()})." ) classes_map = test_dataset.classes_map print(f"Measuring {model_version} on {dataset} dataset, with {method}") print("-" * 10) pbar = tqdm(total=test_dataset.__len__(), desc="Model test completion") multipy_by_inputs = False if method == METHODS["ig"]: attr_method = IntegratedGradients(model) nt_samples = 1 n_perturb_samples = 1 if method == METHODS["saliency"]: attr_method = Saliency(model) nt_samples = 8 n_perturb_samples = 2 if method == METHODS["gradcam"]: if model_version == "efficientnet": attr_method = GuidedGradCam(model, model._conv_stem) elif model_version == "densenet": attr_method = GuidedGradCam(model, model.features.conv0) else: attr_method = GuidedGradCam(model, model.conv1) nt_samples = 8 n_perturb_samples = 2 if method == METHODS["deconv"]: attr_method = Deconvolution(model) nt_samples = 8 n_perturb_samples = 2 if method == METHODS["gradshap"]: attr_method = GradientShap(model) nt_samples = 8 n_perturb_samples = 2 if method == METHODS["gbp"]: attr_method = GuidedBackprop(model) nt_samples = 8 n_perturb_samples = 2 if method == "lime": attr_method = Lime(model) nt_samples = 8 n_perturb_samples = 2 feature_mask = torch.tensor(lime_mask).to(device) multipy_by_inputs = True if method == METHODS["ig"]: nt = attr_method else: nt = NoiseTunnel(attr_method) scores = [] @infidelity_perturb_func_decorator(multipy_by_inputs=multipy_by_inputs) def perturb_fn(inputs): noise = torch.tensor(np.random.normal(0, 0.003, inputs.shape)).float() noise = noise.to(device) return inputs - noise OUR_FILTERS = [ "none", "fx_freaky_details 2,10,1,11,0,32,0", "normalize_local 8,10", "fx_boost_chroma 90,0,0", "fx_mighty_details 25,1,25,1,11,0", "sharpen 300", ] idx = 0 filter_count = 0 filter_attrs = {filter_name: [] for filter_name in OUR_FILTERS} predicted_main_class = 0 for input, label in data_loader: pbar.update(1) inv_input = invTrans(input) input = input.to(device) input.requires_grad = True output = model(input) output = F.softmax(output, dim=1) prediction_score, pred_label_idx = torch.topk(output, 1) prediction_score = prediction_score.cpu().detach().numpy()[0][0] pred_label_idx.squeeze_() if OUR_FILTERS[filter_count] == 'none': predicted_main_class = pred_label_idx.item() if method == METHODS["gradshap"]: baseline = torch.randn(input.shape) baseline = baseline.to(device) if method == "lime": attributions = attr_method.attribute(input, target=1, n_samples=50) elif method == METHODS["ig"]: attributions = nt.attribute( input, target=predicted_main_class, n_steps=25, ) elif method == METHODS["gradshap"]: attributions = nt.attribute(input, target=predicted_main_class, baselines=baseline) else: attributions = nt.attribute( input, nt_type="smoothgrad", nt_samples=nt_samples, target=predicted_main_class, ) if use_infidelity: infid = infidelity(model, perturb_fn, input, attributions, target=predicted_main_class) inf_value = infid.cpu().detach().numpy()[0] else: inf_value = 0 if use_sensitivity: if method == "lime": sens = sensitivity_max( attr_method.attribute, input, target=predicted_main_class, n_perturb_samples=1, n_samples=200, feature_mask=feature_mask, ) elif method == METHODS["ig"]: sens = sensitivity_max( nt.attribute, input, target=predicted_main_class, n_perturb_samples=n_perturb_samples, n_steps=25, ) elif method == METHODS["gradshap"]: sens = sensitivity_max( nt.attribute, input, target=predicted_main_class, n_perturb_samples=n_perturb_samples, baselines=baseline, ) else: sens = sensitivity_max( nt.attribute, input, target=predicted_main_class, n_perturb_samples=n_perturb_samples, ) sens_value = sens.cpu().detach().numpy()[0] else: sens_value = 0 # filter_name = test_dataset.data.iloc[pbar.n]["filter"].split(" ")[0] attr_data = attributions.squeeze().cpu().detach().numpy() if render: fig, ax = viz.visualize_image_attr_multiple( np.transpose(attr_data, (1, 2, 0)), np.transpose(inv_input.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map"], ["all", "positive"], titles=["original_image", "heat_map"], cmap=default_cmap, show_colorbar=True, use_pyplot=False, fig_size=(8, 6), ) if use_sensitivity or use_infidelity: ax[0].set_xlabel( f"Infidelity: {'{0:.6f}'.format(inf_value)}\n Sensitivity: {'{0:.6f}'.format(sens_value)}" ) fig.suptitle( f"True: {classes_map[str(label.numpy()[0])][0]}, Pred: {classes_map[str(pred_label_idx.item())][0]}\nScore: {'{0:.4f}'.format(prediction_score)}", fontsize=16, ) fig.savefig( os.path.join( out_folder, f"{str(idx)}-{str(filter_count)}-{str(label.numpy()[0])}-{str(OUR_FILTERS[filter_count])}-{classes_map[str(label.numpy()[0])][0]}-{classes_map[str(pred_label_idx.item())][0]}.png", )) plt.close(fig) # if pbar.n > 25: # break score_for_true_label = output.cpu().detach().numpy( )[0][predicted_main_class] filter_attrs[OUR_FILTERS[filter_count]] = [ np.moveaxis(attr_data, 0, -1), "{0:.8f}".format(score_for_true_label), ] data_range_for_current_set = MAX_ATT_VALUES[model_version][method][ dataset] filter_count += 1 if filter_count >= len(OUR_FILTERS): ssims = [] for rot in OUR_FILTERS: ssims.append("{0:.8f}".format( ssim( filter_attrs["none"][0], filter_attrs[rot][0], win_size=11, data_range=data_range_for_current_set, multichannel=True, ))) ssims.append(filter_attrs[rot][1]) scores.append(ssims) filter_count = 0 predicted_main_class = 0 idx += 1 pbar.close() indexes = [] for filter_name in OUR_FILTERS: indexes.append(str(filter_name) + "-ssim") indexes.append(str(filter_name) + "-score") np.savetxt( os.path.join( out_folder, f"{model_version}-{dataset}-{method}-ssim-with-range.csv"), np.array(scores), delimiter=";", fmt="%s", header=";".join([str(rot) for rot in indexes]), ) print(f"Artifacts stored at {out_folder}")
for batch_idx, (input, target) in enumerate(dataloaders['test']): image_input = input.cpu().data[0].numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image_input = std * image_input + mean image_input = np.clip(image_input, 0, 1) output = model_ft(input) prediction_score, pred_label_idx = torch.topk(output, 1) pred_clname = class_names[pred_label_idx] integrated_gradients = IntegratedGradients(model_ft) noise_tunnel = NoiseTunnel(integrated_gradients) attributions_ig_nt = noise_tunnel.attribute(input, n_samples=10, nt_type='smoothgrad_sq', target=pred_label_idx, baselines=baseline) pdf = PdfPages(inputdir + "res/image_intepret_" + str(batch_idx) + ".pdf") fig, axis = viz.visualize_image_attr_multiple( np.transpose(attributions_ig_nt.squeeze().cpu().detach().numpy(), (1, 2, 0)), image_input, ["original_image", "heat_map"], ["all", "positive"], cmap=default_cmap, show_colorbar=True, titles=[ 'Truth: ' + class_names[target] + ' Prediction: ' + pred_clname, 'NoiseTunnel' ]) pdf.savefig(fig) plt.close('all') pdf.close()
if heatmap_type == 'DeepLiftShap': attribution = deepLiftShap.attribute(tensor_x, baselines=baselines, target=class_index) if heatmap_type in [ 'GuidedBackprop', 'IntegratedGradients', 'DeepLift', 'DeepLiftShap' ]: figure, subplot = viz.visualize_image_attr_multiple( attr=np.transpose( attribution.squeeze().cpu().detach().numpy(), (1, 2, 0)), original_image=None, methods=["heat_map"], signs=["positive"], fig_size=(6, 6), use_pyplot=False, cmap=default_cmap, show_colorbar=False) figure.savefig(file_dest_heatmap) file_dest_heatmap = file_dest_heatmap.replace( '.jpg', '_process.jpg') file_dest_heatmap = file_dest_heatmap.replace( '.png', '_process.png') shutil.copy(file_image, file_dest_heatmap) if heatmap_type == 'LayerGradCam':
def main(args): warnings.filterwarnings("ignore") device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('Executing on device:', device) # --- Get Threshold --- model = './model/base' + str(args.model_index) + '.pkl' threshold, _ = get_threshold(model_dir=model, use_cached=args.use_cached, search_space=np.linspace( 0, 1, args.search_num)) # --- Load Model --- model = torch.load(model, map_location=device).to(device) model = model.eval() # --- Fix Seed --- torch.manual_seed(123) np.random.seed(123) # --- Label --- label_dir = '../CheXpert-v1.0-small/valid.csv' label = pd.read_csv(label_dir) target_label = np.array(list(label.keys())[5:]) target_obsrv = np.array([8, 2, 6, 5, 10]) label = label.values label_gd = label[:, 5:] # --- Image --- img_index = args.image_index img_dir = '../' transform = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), ]) print('Patient:', label[img_index][0]) img = Image.open(os.path.join(img_dir, label[img_index][0])).convert('RGB') img = transform(img) img_transformed = img / 255 img_transformed = img_transformed.unsqueeze(0).to(device) # --- Predict --- pred = model(img_transformed) print() print('[Prediction]') print('{:17s}|{:4s}|{:4s}|{:4s}|{:3s}'.format('Pathology', 'Prob', 'Thrs', 'Pred', 'Ans')) for lbl, prd, thrsh, gd in zip(target_label[target_obsrv], pred[0][target_obsrv], threshold[target_obsrv], label_gd[img_index][target_obsrv]): print('{:17s}:{:4.2f} {:4.2f} {:4d} {:3d}'.format( lbl, prd.item(), thrsh, int(prd.item() > thrsh), int(gd))) print() del pred torch.cuda.empty_cache() gc.collect() model = model.to(torch.device('cpu')) img_transformed = img_transformed.to(torch.device('cpu')) # --- Visualization --- pathology_input = input( 'Please enter which pathology to visualize:\n[0]Atelectasis\n[1]Cardiomegaly\n[2]Consolidation\n[3]Edema\n[4]Pleural Effusion\n[5]Exit\n' ) if pathology_input == '0': pathology = 8 print('Diagnosis on Atelectasis') elif pathology_input == '1': pathology = 2 print('Diagnosis on Cardiomegaly') elif pathology_input == '2': pathology = 6 print('Diagnosis on Consolidation') elif pathology_input == '3': pathology = 5 print('Diagnosis on Edema') elif pathology_input == '4': pathology = 10 print('Diagnosis on Pleural Effusion') elif pathology_input == '5': print('Exiting...') return else: raise NotImplementedError('Only 0-5 are valid input values') default_cmap = LinearSegmentedColormap.from_list('custom blue', [(0, '#ffffff'), (0.25, '#000000'), (1, '#000000')], N=256) print() method_input = input( 'Please enter which method to visualize:\n[0]GradientShap\n[1]DeepLift\n[2]Exit\n' ) if method_input == '0': print('Using GradientShap') # --- Gradient Shap --- gradient_shap = GradientShap(model) # === baseline distribution === rand_img_dist = torch.cat([img_transformed * 0, img_transformed * 1]) attributions_gs = gradient_shap.attribute(img_transformed, n_samples=50, stdevs=0.0001, baselines=rand_img_dist, target=pathology) _ = viz.visualize_image_attr_multiple( np.transpose(attributions_gs.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(img.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map"], ["all", "absolute_value"], cmap=default_cmap, show_colorbar=True) del attributions_gs elif method_input == '1': print('Using DeepLIFT') # --- Deep Lift --- model = model_transform(model) dl = DeepLift(model) attr_dl = dl.attribute(img_transformed, target=pathology, baselines=img_transformed * 0) _ = viz.visualize_image_attr_multiple( np.transpose(attr_dl.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(img.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map"], ["all", "positive"], cmap=default_cmap, show_colorbar=True) del attr_dl elif method_input == '2': print('Exiting...') return else: raise NotImplementedError('Only 0-2 are valid input values') """ elif method_input == '2': print('Using Integrated Gradients') # --- Integrated Gradients --- integrated_gradients = IntegratedGradients(model) attributions_ig = integrated_gradients.attribute(img_transformed, target=pathology, n_steps=200) _ = viz.visualize_image_attr_multiple(np.transpose(attributions_ig.squeeze().cpu().detach().numpy(), (1,2,0)), np.transpose(img.squeeze().cpu().detach().numpy(), (1,2,0)), method=["original_image", "heat_map"], cmap=default_cmap, show_colorbar=True, sign=["all", "positive"]) del attributions_ig elif method_input == '3': print('Using Noise Tunnel') # --- Noise Tunnel --- integrated_gradients = IntegratedGradients(model) noise_tunnel = NoiseTunnel(integrated_gradients) attributions_ig_nt = noise_tunnel.attribute(img_transformed, n_samples=10, nt_type='smoothgrad_sq', target=pathology) _ = viz.visualize_image_attr_multiple(np.transpose(attributions_ig_nt.squeeze().cpu().detach().numpy(), (1,2,0)), np.transpose(img.squeeze().cpu().detach().numpy(), (1,2,0)), ["original_image", "heat_map"], ["all", "positive"], cmap=default_cmap, show_colorbar=True) del attributions_ig_nt """ gc.collect() return
import numpy as np from captum.attr import visualization as viz # 계산 속성 Tensor를 이미지 같은 numpy 배열로 변환합니다. attribution_dog = np.transpose(attribution_dog.squeeze().cpu().detach().numpy(), (1,2,0)) vis_types = ["heat_map", "original_image"] vis_signs = ["all", "all"] # "positive", "negative", 또는 모두 표시하는 "all" # positive 속성은 해당 영역의 존재가 예측 점수를 증가시킨다는 것을 의미합니다. # negative 속성은 해당 영역의 존재가 예측 점수를 낮추는 오답 영역을 의미합니다. _ = viz.visualize_image_attr_multiple(attribution_dog, center_crop(img), vis_types, vis_signs, ["attribution for dog", "image"], show_colorbar = True ) attribution_cat = np.transpose(attribution_cat.squeeze().cpu().detach().numpy(), (1,2,0)) _ = viz.visualize_image_attr_multiple(attribution_cat, center_crop(img), ["heat_map", "original_image"], ["all", "all"], # positive/negative 속성 또는 all ["attribution for cat", "image"], show_colorbar = True )
def generate_heatmap(self, img): if self.is_cpu: model = torch.load(self.model_path, map_location=torch.device("cpu")) else: model = torch.load(self.model_path) model = model["model"] model.eval() transform = transforms.Compose( [transforms.Resize((64, 64)), transforms.ToTensor()]) transform_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) img_t = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = Image.fromarray(np.uint8(img_t)) transformed_img = transform(img) input = transform_normalize(transformed_img) input = input.unsqueeze(0).to(device) output = model(input) output = F.softmax(output, dim=1) prediction_score, pred_label_idx = torch.topk(output, 1) pred_label_idx.squeeze_() default_cmap = LinearSegmentedColormap.from_list("custom blue", [(0, "#ffffff"), (0.25, "#000000"), (1, "#000000")], N=256) gradient_shap = GradientShap(model) rand_img_dist = torch.cat([input * 0, input * 1]) attributions_gs = gradient_shap.attribute( input, n_samples=50, stdevs=0.0001, baselines=rand_img_dist, target=pred_label_idx, ) out = viz.visualize_image_attr_multiple( np.transpose(attributions_gs.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map"], ["all", "absolute_value"], cmap=default_cmap, show_colorbar=True, ) path = "model_output/" + self.model_path_name + "_heat_map.png" out[1][0].get_figure().savefig(path) plt.clf() plt.close() return path
show_colorbar=True, sign='positive', outlier_perc=1) noise_tunnel = NoiseTunnel(integrated_gradients) attributions_ig_nt = noise_tunnel.attribute(input, nt_samples=10, nt_type='smoothgrad_sq', target=pred_label_idx, internal_batch_size=10) _ = viz.visualize_image_attr_multiple( np.transpose(attributions_ig_nt.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map"], ["all", "positive"], cmap=default_cmap, show_colorbar=True) plt.savefig(str(i) + ".png") for i in range(50): img = Image.open('alzheimers_binary/train/NonDemented/nonDem' + str(i) + '.jpg') transformed_img = transform(img) transformed_img = torch.cat( [transformed_img, transformed_img, transformed_img], dim=0) input = transform_normalize(transformed_img) input = input.unsqueeze(0) input = input.to(gpu)
def get_insights(self, tensor_data, _, target=0): default_cmap = LinearSegmentedColormap.from_list( "custom blue", [(0, "#ffffff"), (0.25, "#0000ff"), (1, "#0000ff")], N=256, ) attributions_ig, _ = self.attribute_image_features( self.ig, tensor_data, baselines=tensor_data * 0, return_convergence_delta=True, n_steps=15, ) attributions_occ = self.attribute_image_features( self.occlusion, tensor_data, strides=(3, 8, 8), sliding_window_shapes=(3, 15, 15), baselines=tensor_data * 0, ) attributions_lgc = self.attribute_image_features( self.layer_gradcam, tensor_data) upsamp_attr_lgc = LayerAttribution.interpolate(attributions_lgc, tensor_data.shape[2:]) matplot_viz_ig, _ = viz.visualize_image_attr_multiple( np.transpose(attributions_ig.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(tensor_data.squeeze().cpu().detach().numpy(), (1, 2, 0)), use_pyplot=False, methods=["original_image", "heat_map"], cmap=default_cmap, show_colorbar=True, signs=["all", "positive"], titles=["Original", "Integrated Gradients"], ) matplot_viz_occ, _ = viz.visualize_image_attr_multiple( np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(tensor_data.squeeze().cpu().detach().numpy(), (1, 2, 0)), [ "original_image", "heat_map", "heat_map", ], ["all", "positive", "negative"], show_colorbar=True, titles=[ "Original", "Positive Attribution", "Negative Attribution", ], fig_size=(18, 6), use_pyplot=False, ) matplot_viz_lgc, _ = viz.visualize_image_attr_multiple( upsamp_attr_lgc[0].cpu().permute(1, 2, 0).detach().numpy(), tensor_data.squeeze().permute(1, 2, 0).cpu().numpy(), use_pyplot=False, methods=["original_image", "blended_heat_map", "blended_heat_map"], signs=["all", "positive", "negative"], show_colorbar=True, titles=[ "Original", "Positive Attribution", "Negative Attribution", ], fig_size=(18, 6)) occ_bytes = self.output_bytes(matplot_viz_occ) ig_bytes = self.output_bytes(matplot_viz_ig) lgc_bytes = self.output_bytes(matplot_viz_lgc) output = [{ "b64": b64encode(row).decode("utf8") } if isinstance(row, (bytes, bytearray)) else row for row in [ig_bytes, occ_bytes, lgc_bytes]] return output
# most “cat-like”. # occlusion = Occlusion(model) attributions_occ = occlusion.attribute(input_img, target=pred_label_idx, strides=(3, 8, 8), sliding_window_shapes=(3, 15, 15), baselines=0) _ = viz.visualize_image_attr_multiple( np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0)), np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map", "heat_map", "masked_image"], ["all", "positive", "negative", "positive"], show_colorbar=True, titles=[ "Original", "Positive Attribution", "Negative Attribution", "Masked" ], fig_size=(18, 6)) ###################################################################### # Again, we see greater significance placed on the region of the image # that contains the cat. # ######################################################################### # Layer Attribution with Layer GradCAM # ------------------------------------ # # **Layer Attribution** allows you to attribute the activity of hidden
def measure_model( model_version, dataset, out_folder, weights_dir, device, method=METHODS["gradcam"], sample_images=50, step=1, ): invTrans = get_inverse_normalization_transformation() data_dir = os.path.join("data") if model_version == "resnet18": model = create_resnet18_model(num_of_classes=NUM_OF_CLASSES[dataset]) elif model_version == "resnet50": model = create_resnet50_model(num_of_classes=NUM_OF_CLASSES[dataset]) elif model_version == "densenet": model = create_densenet121_model( num_of_classes=NUM_OF_CLASSES[dataset]) else: model = create_efficientnetb0_model( num_of_classes=NUM_OF_CLASSES[dataset]) model.load_state_dict(torch.load(weights_dir)) # print(model) model.eval() model.to(device) test_dataset = CustomDataset( dataset=dataset, transformer=get_default_transformation(), data_type="test", root_dir=data_dir, step=step, ) data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4) try: image_ids = random.sample(range(0, test_dataset.__len__()), sample_images) except ValueError: raise ValueError( f"Image sample number ({sample_images}) exceeded dataset size ({test_dataset.__len__()})." ) classes_map = test_dataset.classes_map print(f"Measuring {model_version} on {dataset} dataset, with {method}") print("-" * 10) pbar = tqdm(total=test_dataset.__len__(), desc="Model test completion") multipy_by_inputs = False if method == METHODS["ig"]: attr_method = IntegratedGradients(model) nt_samples = 8 n_perturb_samples = 3 if method == METHODS["saliency"]: attr_method = Saliency(model) nt_samples = 8 n_perturb_samples = 10 if method == METHODS["gradcam"]: if model_version == "efficientnet": attr_method = GuidedGradCam(model, model._conv_stem) elif model_version == "densenet": attr_method = GuidedGradCam(model, model.features.conv0) else: attr_method = GuidedGradCam(model, model.conv1) nt_samples = 8 n_perturb_samples = 10 if method == METHODS["deconv"]: attr_method = Deconvolution(model) nt_samples = 8 n_perturb_samples = 10 if method == METHODS["gradshap"]: attr_method = GradientShap(model) nt_samples = 8 if model_version == "efficientnet": n_perturb_samples = 3 elif model_version == "densenet": n_perturb_samples = 2 else: n_perturb_samples = 10 if method == METHODS["gbp"]: attr_method = GuidedBackprop(model) nt_samples = 8 n_perturb_samples = 10 if method == "lime": attr_method = Lime(model) nt_samples = 8 n_perturb_samples = 10 feature_mask = torch.tensor(lime_mask).to(device) multipy_by_inputs = True if method == METHODS['ig']: nt = attr_method else: nt = NoiseTunnel(attr_method) scores = [] @infidelity_perturb_func_decorator(multipy_by_inputs=multipy_by_inputs) def perturb_fn(inputs): noise = torch.tensor(np.random.normal(0, 0.003, inputs.shape)).float() noise = noise.to(device) return inputs - noise for input, label in data_loader: pbar.update(1) inv_input = invTrans(input) input = input.to(device) input.requires_grad = True output = model(input) output = F.softmax(output, dim=1) prediction_score, pred_label_idx = torch.topk(output, 1) prediction_score = prediction_score.cpu().detach().numpy()[0][0] pred_label_idx.squeeze_() if method == METHODS['gradshap']: baseline = torch.randn(input.shape) baseline = baseline.to(device) if method == "lime": attributions = attr_method.attribute(input, target=1, n_samples=50) elif method == METHODS['ig']: attributions = nt.attribute( input, target=pred_label_idx, n_steps=25, ) elif method == METHODS['gradshap']: attributions = nt.attribute(input, target=pred_label_idx, baselines=baseline) else: attributions = nt.attribute( input, nt_type="smoothgrad", nt_samples=nt_samples, target=pred_label_idx, ) infid = infidelity(model, perturb_fn, input, attributions, target=pred_label_idx) if method == "lime": sens = sensitivity_max( attr_method.attribute, input, target=pred_label_idx, n_perturb_samples=1, n_samples=200, feature_mask=feature_mask, ) elif method == METHODS['ig']: sens = sensitivity_max( nt.attribute, input, target=pred_label_idx, n_perturb_samples=n_perturb_samples, n_steps=25, ) elif method == METHODS['gradshap']: sens = sensitivity_max(nt.attribute, input, target=pred_label_idx, n_perturb_samples=n_perturb_samples, baselines=baseline) else: sens = sensitivity_max( nt.attribute, input, target=pred_label_idx, n_perturb_samples=n_perturb_samples, ) inf_value = infid.cpu().detach().numpy()[0] sens_value = sens.cpu().detach().numpy()[0] if pbar.n in image_ids: attr_data = attributions.squeeze().cpu().detach().numpy() fig, ax = viz.visualize_image_attr_multiple( np.transpose(attr_data, (1, 2, 0)), np.transpose(inv_input.squeeze().cpu().detach().numpy(), (1, 2, 0)), ["original_image", "heat_map"], ["all", "positive"], titles=["original_image", "heat_map"], cmap=default_cmap, show_colorbar=True, use_pyplot=False, fig_size=(8, 6), ) ax[0].set_xlabel( f"Infidelity: {'{0:.6f}'.format(inf_value)}\n Sensitivity: {'{0:.6f}'.format(sens_value)}" ) fig.suptitle( f"True: {classes_map[str(label.numpy()[0])][0]}, Pred: {classes_map[str(pred_label_idx.item())][0]}\nScore: {'{0:.4f}'.format(prediction_score)}", fontsize=16, ) fig.savefig( os.path.join( out_folder, f"{str(pbar.n)}-{classes_map[str(label.numpy()[0])][0]}-{classes_map[str(pred_label_idx.item())][0]}.png", )) plt.close(fig) # if pbar.n > 25: # break scores.append([inf_value, sens_value]) pbar.close() np.savetxt( os.path.join(out_folder, f"{model_version}-{dataset}-{method}.csv"), np.array(scores), delimiter=",", header="infidelity,sensitivity", ) print(f"Artifacts stored at {out_folder}")