format="png",
               cmap="hot")

    # fig = plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.axis('off')
    # fig.axes.get_xaxis().set_visible(False)
    # fig.axes.get_yaxis().set_visible(False)
    # plt.savefig(img_path, bbox_inches='tight', pad_inches = 0)

    # plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.savefig(img_path)


net = CAN()
#net.load_state_dict(torch.load('best_val_tensorboard.wts'))
net.load_state_dict(checkpoint["model_state"])
net.to(device)
net.eval()

i = 0

save_data = True

running_metrics_val = runningScore(2)

with torch.no_grad():
    for data in trainloader:
        imgs, labels = data
        imgs, labels = imgs.to(device), labels.to(device)
        if save_data:
            save_img(imgs, 'images/' + str(i) + '.png', True)
               np.transpose(npimg, (1, 2, 0)),
               format="png",
               cmap="hot")

    # fig = plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.axis('off')
    # fig.axes.get_xaxis().set_visible(False)
    # fig.axes.get_yaxis().set_visible(False)
    # plt.savefig(img_path, bbox_inches='tight', pad_inches = 0)

    # plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.savefig(img_path)


net = CAN()
net.load_state_dict(torch.load('best_val_tensorboard.wts'))
net.to(device)
net.eval()

i = 0

save_data = False

running_metrics_val = runningScore(2)

with torch.no_grad():
    for data in trainloader:
        imgs, labels = data
        imgs, labels = imgs.to(device), labels.to(device)
        if save_data:
            save_img(imgs, 'images/' + str(i) + '.png', True)
pretrained_state_dict['features.23.weight'] = pretrained_state_dict.pop(
    'features.24.weight')
pretrained_state_dict['features.23.bias'] = pretrained_state_dict.pop(
    'features.24.bias')
pretrained_state_dict['features.25.weight'] = pretrained_state_dict.pop(
    'features.26.weight')
pretrained_state_dict['features.25.bias'] = pretrained_state_dict.pop(
    'features.26.bias')
pretrained_state_dict['features.27.weight'] = pretrained_state_dict.pop(
    'features.28.weight')
pretrained_state_dict['features.27.bias'] = pretrained_state_dict.pop(
    'features.28.bias')

#net.load_state_dict(torch.load('trained.wts'))

net.load_state_dict(pretrained_state_dict, strict=False)
net.to(device)
net.train()

augmentations = Compose([RandomRotate(5), RandomHorizontallyFlip()])
train_dataset = tusimpleLoader('/home/tejus/Downloads/train_set/',
                               split="train",
                               augmentations=augmentations)
trainloader = torch.utils.data.DataLoader(train_dataset,
                                          batch_size=TRAIN_BATCH,
                                          shuffle=True,
                                          num_workers=TRAIN_BATCH,
                                          pin_memory=True)

val_dataset = tusimpleLoader('/home/tejus/Downloads/train_set/',
                             split="val",
# loss_fn = nn.BCEWithLogitsLoss()
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience = 2, verbose = True, min_lr = 0.000001)
loss_fn = nn.CrossEntropyLoss()
net.to(device)

if not resume_training:
    pretrained_state_dict = models.vgg16(pretrained=True).state_dict()
    pretrained_state_dict = {k:v for k, v in pretrained_state_dict.items() if 'classifier' not in k}
    #rename parameters. ordering changes because maxpool layers aren't present in network.
    pretrained_state_dict['features.23.weight'] = pretrained_state_dict.pop('features.24.weight')
    pretrained_state_dict['features.23.bias'] = pretrained_state_dict.pop('features.24.bias')
    pretrained_state_dict['features.25.weight'] = pretrained_state_dict.pop('features.26.weight')
    pretrained_state_dict['features.25.bias'] = pretrained_state_dict.pop('features.26.bias')
    pretrained_state_dict['features.27.weight'] = pretrained_state_dict.pop('features.28.weight')
    pretrained_state_dict['features.27.bias'] = pretrained_state_dict.pop('features.28.bias')
    net.load_state_dict(pretrained_state_dict, strict=False)
    start_iter = 0
else:
    checkpoint = torch.load(checkpoint_dir)
    net.load_state_dict(checkpoint["model_state"])
    optimizer.load_state_dict(checkpoint["optimizer_state"])
    scheduler.load_state_dict(checkpoint["scheduler_state"])
    start_iter = checkpoint["epoch"]
    logger.info(
        "Loaded checkpoint '{}' (epoch {})".format(
            checkpoint_dir, checkpoint["epoch"]
        )
    )

net.train()