import torch.nn as nn import torch.utils.data.distributed from torchvision.models.densenet import densenet201 from torchvision import transforms from torchvision import datasets import json from imagemove2 import getlookup a, b, c = getlookup() d = [a[x] for x in a] labels_1_ptype = { 0: 8, 1: 2, 2: 0, 3: 7, 4: 9, 5: 1, 6: 4, 7: 3, 8: 10, 9: 5, 10: 6, } startidx = { 'Apple': 0, 'Cedar': 4, 'Cherry': 6, 'Corn': 9, 'Grape': 17, 'Citrus': 24, 'Peach': 27,
else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(1 - dampening, d_p) if nesterov: d_p = d_p.add(momentum, buf) else: d_p = buf p.data.add_(-group['lr'], d_p) return loss if __name__ == '__main__': lookup = getlookup() args = DotDict({ 'batch_size': 32, 'batch_mul': 4, 'val_batch_size': 10, 'cuda': True, 'model': '', 'train_plot': False, 'epochs': [60], 'try_no': '1_densecedar', 'imsize': [224], 'imsize_l': [256], # 'traindir': '/root/palm/DATA/plant/typesep_train/Cedar', 'valdir': '/media/palm/Unimportant/pdr2018/typesep_train/Cedar', 'workers': 4, 'resume': False,
def main(): root_path = '/media/palm/Unimportant/pdr2018/typesep_validate/Tomato/' image_name = 'c9ebc74c2177ce60a8230855333fb9e7.jpg' folder_name = '14_Tomato_Spider_Mite_Damage_Serious' # image_path = root_path+'/14_Tomato_Spider_Mite_Damage_Serious/1c0f1ae1374d01c2933069232735a331.jpg' image_path = os.path.join(root_path, folder_name, image_name) topk = 1 cuda = 'cuda' arch = 'densenet201' CONFIG = { 'resnet152': { 'target_layer': 'layer4.2', 'input_size': 224 }, 'vgg19': { 'target_layer': 'features.36', 'input_size': 224 }, 'vgg19_bn': { 'target_layer': 'features.52', 'input_size': 224 }, 'inception_v3': { 'target_layer': 'Mixed_7c', 'input_size': 299 }, 'densenet201': { 'target_layer': 'features.denseblock4', 'input_size': 224 }, # Add your model }.get(arch) a, b, c = getlookup() device = torch.device( 'cuda' if cuda and torch.cuda.is_available() else 'cpu') if cuda: current_device = torch.cuda.current_device() print('Running on the GPU:', torch.cuda.get_device_name(current_device)) else: print('Running on the CPU') # Synset words classes = c['Tomato'] # Model model = getmodel(20) checkpoint = torch.load('checkpoint/try_4_densesep-Tomatotemp.t7') model.load_state_dict(checkpoint['net']) model.to('cuda') model.eval() # Image raw_image = cv2.imread(image_path)[..., ::-1] raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2) image = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ) ])(raw_image).unsqueeze(0) # ========================================================================= print('Grad-CAM') # ========================================================================= gcam = GradCAM(model=model) probs, idx = gcam.forward(image.to(device)) for i in range(0, topk): gcam.backward(idx=idx[i]) output = gcam.generate(target_layer=CONFIG['target_layer']) save_gradcam( 'results/{}_{}_gcam_{}.png'.format(image_name, classes[idx[i]], arch), output, raw_image) print('[{:.5f}] {}'.format(probs[i], classes[idx[i]])) # ========================================================================= print('Vanilla Backpropagation') # ========================================================================= bp = BackPropagation(model=model) probs, idx = bp.forward(image.to(device)) for i in range(0, topk): bp.backward(idx=idx[i]) output = bp.generate() save_gradient( 'results/{}_{}_bp_{}.png'.format(image_name, classes[idx[i]], arch), output) print('[{:.5f}] {}'.format(probs[i], classes[idx[i]])) # ========================================================================= print('Deconvolution') # ========================================================================= deconv = Deconvolution( model=copy.deepcopy(model)) # TODO: remove hook func in advance probs, idx = deconv.forward(image.to(device)) for i in range(0, topk): deconv.backward(idx=idx[i]) output = deconv.generate() save_gradient( 'results/{}_{}_deconv_{}.png'.format(image_name, classes[idx[i]], arch), output) print('[{:.5f}] {}'.format(probs[i], classes[idx[i]])) # ========================================================================= print('Guided Backpropagation/Guided Grad-CAM') # ========================================================================= gbp = GuidedBackPropagation(model=model) probs, idx = gbp.forward(image.to(device)) for i in range(0, topk): gcam.backward(idx=idx[i]) region = gcam.generate(target_layer=CONFIG['target_layer']) gbp.backward(idx=idx[i]) feature = gbp.generate() h, w, _ = feature.shape region = cv2.resize(region, (w, h))[..., np.newaxis] output = feature * region save_gradient( 'results/{}_{}_gbp_{}.png'.format(image_name, classes[idx[i]], arch), feature) save_gradient( 'results/{}_{}_ggcam_{}.png'.format(image_name, classes[idx[i]], arch), output) print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))