Ejemplo n.º 1
0
def create_cam(config):
    if not os.path.exists(config.result_path):
        os.mkdir(config.result_path)

    test_loader, num_class = utils.get_testloader(config.dataset,
                                                  config.dataset_path,
                                                  config.img_size)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    mdl = model.NLayerDiscriminator()
    #mdl = nn.DataParallel(mdl)
    cnn = mdl.to(device)
    state_dict = torch.load(os.path.join(config.model_path, config.model_name))
    cnn.load_state_dict(state_dict)
    finalconv_name = 'conv'

    # hook
    feature_blobs = []

    def hook_feature(module, input, output):
        feature_blobs.append(output.cpu().data.numpy())

    cnn._modules.get(finalconv_name).register_forward_hook(hook_feature)
    params = list(cnn.parameters())
    # get weight only from the last layer(linear)
    weight_softmax = np.squeeze(params[-2].cpu().data.numpy())

    def returnCAM(feature_conv, weight_softmax, class_idx):
        size_upsample = (config.img_size, config.img_size)
        _, nc, h, w = feature_conv.shape
        output_cam = []
        cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h * w)))
        cam = cam.reshape(h, w)
        cam = cam - np.min(cam)
        cam_img = cam / np.max(cam)
        cam_img = np.uint8(255 * cam_img)
        output_cam.append(cv2.resize(cam_img, size_upsample))
        return output_cam

    for i, (image_tensor, label) in enumerate(test_loader):
        image_PIL = transforms.ToPILImage()(image_tensor[0])
        image_PIL.save(os.path.join(config.result_path, 'img%d.png' % (i + 1)))

        image_tensor = image_tensor.to(device)
        logit, _ = cnn(image_tensor)
        h_x = F.softmax(logit, dim=1).data.squeeze()
        probs, idx = h_x.sort(0, True)
        print("True label : %d, Predicted label : %d, Probability : %.2f" %
              (label.item(), idx[0].item(), probs[0].item()))
        CAMs = returnCAM(feature_blobs[0], weight_softmax, [idx[0].item()])
        img = cv2.imread(
            os.path.join(config.result_path, 'img%d.png' % (i + 1)))
        height, width, _ = img.shape
        heatmap = cv2.applyColorMap(cv2.resize(CAMs[0], (width, height)),
                                    cv2.COLORMAP_JET)
        result = heatmap * 0.3 + img * 0.5
        cv2.imwrite(os.path.join(config.result_path, 'cam%d.png' % (i + 1)),
                    result)
        if i + 1 == config.num_result:
            break
        feature_blobs.clear()
Ejemplo n.º 2
0
def load_models(directory, batch_num):
    generator = model.GlobalGenerator()
    discriminator = model.NLayerDiscriminator(input_nc=3)
    gen_name = os.path.join(directory, '%05d_generator.pth' % batch_num)
    dis_name = os.path.join(directory, '%05d_discriminator.pth' % batch_num)

    if os.path.isfile(gen_name) and os.path.isfile(dis_name):
        gen_dict = torch.load(gen_name)
        dis_dict = torch.load(dis_name)
        generator.load_state_dict(gen_dict)
        discriminator.load_state_dict(dis_dict)
        print('Models loaded, resume training from batch %05d...' % batch_num)
    else:
        print('Cannot find saved models, start training from scratch...')
        batch_num = 0

    return generator, discriminator, batch_num
Ejemplo n.º 3
0
def load_models(directory, batch_num):
    # 20180924: smaller network.
    generator = model.GlobalGenerator(n_downsampling=2, n_blocks=6)
    discriminator = model.NLayerDiscriminator(input_nc=3, n_layers=3)  # 48 input
    gen_name = os.path.join(directory, '%05d_generator.pth' % batch_num)
    dis_name = os.path.join(directory, '%05d_discriminator.pth' % batch_num)

    if os.path.isfile(gen_name) and os.path.isfile(dis_name):
        gen_dict = torch.load(gen_name)
        dis_dict = torch.load(dis_name)
        generator.load_state_dict(gen_dict)
        discriminator.load_state_dict(dis_dict)
        print('Models loaded, resume training from batch %05d...' % batch_num)
    else:
        print('Cannot find saved models, start training from scratch...')
        batch_num = 0

    return generator, discriminator, batch_num
Ejemplo n.º 4
0
torch.manual_seed(0)
if torch.cuda.is_available():
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

# prepare neural networks
model_g = model.Generator(n_res_blocks=opt.num_res_blocks)
print('# generator parameters:',
      sum(param.numel() for param in model_g.parameters()))
model_d = model.Discriminator(kernel_size=opt.kernel_size,
                              gaussian=opt.gaussian,
                              wgan=opt.wgan,
                              highpass=opt.highpass)
print('# discriminator parameters:',
      sum(param.numel() for param in model_d.parameters()))
model_d_save = model.NLayerDiscriminator(3, n_layers=2)
DWT2 = DWTForward(J=1, mode='zero', wave='haar').cuda()

g_loss_module = loss.GeneratorLoss(**vars(opt))

# filters are used for generating validation images
filter_low_module = model.FilterLow(kernel_size=opt.kernel_size,
                                    gaussian=opt.gaussian,
                                    include_pad=False)
filter_high_module = model.FilterHigh(kernel_size=opt.kernel_size,
                                      gaussian=opt.gaussian,
                                      include_pad=False)
if torch.cuda.is_available():
    model_g = model_g.cuda()
    model_d = model_d.cuda()
    model_d_save = model_d_save.cuda()