def demo4(image_paths, arch, topk, stride, n_batches, output_dir, cuda): """ Generate occlusion sensitivity maps """ device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision model = models.__dict__[arch](pretrained=True) model = torch.nn.DataParallel(model) model.to(device) model.eval() # Images images = [] raw_images = [] print("Images:") for i, image_path in enumerate(image_paths): print("\t#{}: {}".format(i, image_path)) image, raw_image = preprocess(image_path) images.append(image) raw_images.append(raw_image) images = torch.stack(images).to(device) print("Occlusion Sensitivity:") patche_sizes = [10, 15, 25, 35, 45, 90] logits = model(images) probs = F.softmax(logits, dim=1) probs, ids = probs.sort(dim=1, descending=True) for i in range(topk): for p in patche_sizes: print("Patch:", p) sensitivity = occlusion_sensitivity(model, images, ids[:, [i]], patch=p, stride=stride, n_batches=n_batches) # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_sensitivity( filename=osp.join( output_dir, "{}-{}-sensitivity-{}-{}.png".format( j, arch, p, classes[ids[j, i]]), ), maps=sensitivity[j], )
def demo4(image_paths, arch, topk, stride, n_batches, output_dir, cuda): """ Generate occlusion sensitivity maps """ device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision model = models.__dict__[arch](pretrained=True) model = torch.nn.DataParallel(model) model.to(device) model.eval() # Images images = [] raw_images = [] print("Images:") for i, image_path in enumerate(image_paths): print("\t#{}: {}".format(i, image_path)) image, raw_image = preprocess(image_path) images.append(image) raw_images.append(raw_image) images = torch.stack(images).to(device) print("Occlusion Sensitivity:") patche_sizes = [10, 15, 25, 35, 45, 90] logits = model(images) probs = F.softmax(logits, dim=1) probs, ids = probs.sort(dim=1, descending=True) for i in range(topk): for p in patche_sizes: print("Patch:", p) sensitivity = occlusion_sensitivity( model, images, ids[:, [i]], patch=p, stride=stride, n_batches=n_batches ) # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_sensitivity( filename=osp.join( output_dir, "{}-{}-sensitivity-{}-{}.png".format( j, arch, p, classes[ids[j, i]] ), ), maps=sensitivity[j], )
def demo4(image_paths, arch, topk, stride, n_batches, output_dir, cuda): """ Generate occlusion sensitivity maps """ device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision # check if CUDA is available train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available. Training on CPU ...') else: print('CUDA is available! Training on GPU ...') device = get_device(cuda) # Synset words classes = get_classtable() # Model from torchvision PRE_MODEL_DIR = '/content/gdrive/My Drive/UnB/TCC-1/TCC1-1-dataset-final/restnet_model152_trained_exp7.pt' model_name = 'resnet' num_classes = 9 feature_extract = False model, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True) if train_on_gpu: state = torch.load(PRE_MODEL_DIR) else: state = torch.load(PRE_MODEL_DIR, map_location='cpu') # Loading weights in restnet architecture model.load_state_dict(state['state_dict']) model = torch.nn.DataParallel(model) model.to(device) model.eval() # Images images = [] raw_images = [] print("Images:") for i, image_path in enumerate(image_paths): print("\t#{}: {}".format(i, image_path)) image, raw_image = preprocess(image_path) images.append(image) raw_images.append(raw_image) images = torch.stack(images).to(device) print("Occlusion Sensitivity:") patche_sizes = [10, 15, 25, 35, 45, 90] logits = model(images) probs = F.softmax(logits, dim=1) probs, ids = probs.sort(dim=1, descending=True) for i in range(topk): for p in patche_sizes: print("Patch:", p) sensitivity = occlusion_sensitivity(model, images, ids[:, [i]], patch=p, stride=stride, n_batches=n_batches) # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_sensitivity( filename=osp.join( output_dir, "{}-{}-sensitivity-{}-{}.png".format( j, arch, p, classes[ids[j, i]]), ), maps=sensitivity[j], )