plt.imsave(img_path,
               np.transpose(npimg, (1, 2, 0)),
               format="png",
               cmap="hot")

    # fig = plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.axis('off')
    # fig.axes.get_xaxis().set_visible(False)
    # fig.axes.get_yaxis().set_visible(False)
    # plt.savefig(img_path, bbox_inches='tight', pad_inches = 0)

    # plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.savefig(img_path)


net = CAN()
#net.load_state_dict(torch.load('best_val_tensorboard.wts'))
net.load_state_dict(checkpoint["model_state"])
net.to(device)
net.eval()

i = 0

save_data = True

running_metrics_val = runningScore(2)

with torch.no_grad():
    for data in trainloader:
        imgs, labels = data
        imgs, labels = imgs.to(device), labels.to(device)
    plt.imsave(img_path,
               np.transpose(npimg, (1, 2, 0)),
               format="png",
               cmap="hot")

    # fig = plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.axis('off')
    # fig.axes.get_xaxis().set_visible(False)
    # fig.axes.get_yaxis().set_visible(False)
    # plt.savefig(img_path, bbox_inches='tight', pad_inches = 0)

    # plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.savefig(img_path)


net = CAN()
net.load_state_dict(torch.load('best_val_tensorboard.wts'))
net.to(device)
net.eval()

i = 0

save_data = False

running_metrics_val = runningScore(2)

with torch.no_grad():
    for data in trainloader:
        imgs, labels = data
        imgs, labels = imgs.to(device), labels.to(device)
        if save_data:
        img /= 255
        img = img[:, [1, 0, 2], :, :]
    else:
        img = img.unsqueeze(1)
        img *= 255
    img = torchvision.utils.make_grid(img)
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.savefig(img_path)


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

torch.manual_seed(102)

net = CAN()

pretrained_state_dict = models.vgg16(pretrained=True).state_dict()
pretrained_state_dict = {
    k: v
    for k, v in pretrained_state_dict.items() if 'classifier' not in k
}
#rename parameters. ordering changes because maxpool layers aren't present in network.
pretrained_state_dict['features.23.weight'] = pretrained_state_dict.pop(
    'features.24.weight')
pretrained_state_dict['features.23.bias'] = pretrained_state_dict.pop(
    'features.24.bias')
pretrained_state_dict['features.25.weight'] = pretrained_state_dict.pop(
    'features.26.weight')
pretrained_state_dict['features.25.bias'] = pretrained_state_dict.pop(
    'features.26.bias')
resume_training = True  
checkpoint_dir = '/home/tejus/lane-seg-experiments/Segmentation/CAN_logger/frontend_only/runs/2018-10-06_14-51-26/best_val_model_tested.pkl'
run_id = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
logdir = os.path.join('runs/' , str(run_id))
writer = SummaryWriter(log_dir=logdir)
print('RUNDIR: {}'.format(logdir))
logger = get_logger(logdir)
logger.info('Let the party begin')

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

torch.manual_seed(102)

# Network definition 

net = CAN()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=0.0001)
# optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr = 0.0001, momentum=0.9) # 0.00001
# loss_fn = nn.BCEWithLogitsLoss()
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience = 2, verbose = True, min_lr = 0.000001)
loss_fn = nn.CrossEntropyLoss()
net.to(device)

if not resume_training:
    pretrained_state_dict = models.vgg16(pretrained=True).state_dict()
    pretrained_state_dict = {k:v for k, v in pretrained_state_dict.items() if 'classifier' not in k}
    #rename parameters. ordering changes because maxpool layers aren't present in network.
    pretrained_state_dict['features.23.weight'] = pretrained_state_dict.pop('features.24.weight')
    pretrained_state_dict['features.23.bias'] = pretrained_state_dict.pop('features.24.bias')
    pretrained_state_dict['features.25.weight'] = pretrained_state_dict.pop('features.26.weight')
    pretrained_state_dict['features.25.bias'] = pretrained_state_dict.pop('features.26.bias')