def visualize_autoencoder(dataset, get_semantic_label, ae, naive):
    num_classes = config('autoencoder.num_classes')
    cols = ['Orig', 'Naive recon', 'Autoencoder recon']
    rows = []
    fig, axes = plt.subplots(nrows=num_classes, ncols=3, figsize=(10, 16))

    for c in range(num_classes):
        X = dataset[c]
        y = get_semantic_label(c)
        rows.append(y)

        image = utils.denormalize_image(X[0].data.numpy().transpose((1, 2, 0)))
        naive_img = utils.denormalize_image(
            naive(X)[1][0].data.numpy().transpose((1, 2, 0)))
        ae_img = utils.denormalize_image(
            ae(X)[1][0].data.numpy().transpose((1, 2, 0)))

        axes[c, 0].imshow(image, interpolation='bicubic')
        axes[c, 1].imshow(naive_img, interpolation='bicubic')
        axes[c, 2].imshow(ae_img, interpolation='bicubic')

    # Add column and row headers
    for ax, row in zip(axes[:, 0], rows):
        ax.annotate(row,
                    xy=(0, 0.5),
                    xytext=(110, 0),
                    xycoords=ax.yaxis.label,
                    textcoords='offset points',
                    size='large',
                    ha='right',
                    va='center')

    for ax, col in zip(axes[0], cols):
        ax.set_title(col)

    for ax in axes.ravel():
        ax.axis('off')

    plt.savefig('ae_recon_comparison.png', dpi=200, bbox_inches='tight')
예제 #2
0
def visualize_layer1_activations(i, axarr):
    xi, yi = tr_loader.dataset[i]
    xi = xi.view((1, 3, 288, 384))
    bp = BackPropagation(model=model)
    gcam = GradCAM(model=model)
    target_layer = "conv1"
    target_class = 1
    _ = gcam.forward(xi)
    gcam.backward(ids=torch.tensor([[target_class]]).to(device))
    regions = gcam.generate(target_layer=target_layer)
    activation = regions.detach()
    save_gradcam(
        np.squeeze(activation),
        utils.denormalize_image(np.squeeze(xi.numpy()).transpose(1, 2, 0)),
        axarr,
        i,
    )
예제 #3
0
def report_validation_performance(dataset, get_semantic_label, model, criterion):
    cols = ['Orig', 'Autoencoder recon', 'Orig', 'Autoencoder recon', 'Orig', 'Autoencoder recon']
    rows = []
    fig, axes = plt.subplots(nrows=config('autoencoder.num_classes'),
        ncols=6, figsize=(16,16))
    for i in range(config('autoencoder.num_classes')):
        X = dataset[i]
        rows.append(get_semantic_label(i))
        _, recon = model(X)
        error = criterion(recon, X).item()
        print('label {}, test error is {}'.format(i, error))
        losses = ((recon - X) ** 2).mean(3).mean(2).mean(1).data.numpy()
        best, worst = np.argmin(losses), np.argmax(losses)
        typical = np.argsort(losses)[len(losses)//2]
        print('  best case:', losses[best])
        print(' worst case:', losses[worst])
        print('    typical:', losses[typical])
        axes[i,0].imshow(utils.denormalize_image(
            np.transpose(X[best].numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,1].imshow(utils.denormalize_image(
            np.transpose(recon[best].data.numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,2].imshow(utils.denormalize_image(
            np.transpose(X[worst].numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,3].imshow(utils.denormalize_image(
            np.transpose(recon[worst].data.numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,4].imshow(utils.denormalize_image(
            np.transpose(X[typical].numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,5].imshow(utils.denormalize_image(
            np.transpose(recon[typical].data.numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))

    for ax, row in zip(axes[:,0], rows):
        ax.annotate(row, xy=(0, 0.5), xytext=(110, 0),
            xycoords=ax.yaxis.label, textcoords='offset points',
            size='large', ha='right', va='center')

    for ax, col in zip(axes[0], cols):
        ax.set_title(col)

    for ax in axes.ravel():
        ax.axis('off')

    plt.suptitle('Autoencoder reconstruction\n Best, Worst, Typical', size=20)
    plt.savefig("ae_per_class_perf.png", dpi=200, bbox_inches='tight')
예제 #4
0
def visualize_input(i, axarr):
    xi, yi = tr_loader.dataset[i]
    axarr[0].imshow(utils.denormalize_image(xi.numpy().transpose(1, 2, 0)))
    axarr[0].axis("off")
예제 #5
0
for ax in axes.flatten():
    ax.set_xticks([])
    ax.set_yticks([])

while True:
    rand_idx = np.random.choice(np.arange(len(metadata)),
                                size=N,
                                replace=False)
    X, y = [], []
    for idx in rand_idx:
        filename = os.path.join(config('image_path'), metadata.loc[idx,
                                                                   'filename'])
        X.append(imread(filename))
        y.append(metadata.loc[idx, 'semantic_label'])

    for i, (xi, yi) in enumerate(zip(X, y)):
        axes[0, i].imshow(xi)
        axes[0, i].set_title(yi)

    X_ = resize(np.array(X))
    X_ = standardizer.transform(X_)

    for i, (xi, yi) in enumerate(zip(X_, y)):
        axes[1, i].imshow(denormalize_image(xi), interpolation='bicubic')

    plt.draw()
    if plt.waitforbuttonpress(0) == None:
        break

print('OK, bye!')
예제 #6
0
def run():
    torch.multiprocessing.freeze_support()
    import torchgeometry
    mean = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
    std = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
    # chargement du modele resnet50

    device = torch.device("cuda")
    net = resnet50().to(device)
    net.load_state_dict(torch.load("resnet50-90-regular.pth"))

    normalize = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])

    cifar100_test = torchvision.datasets.CIFAR100(root='./data',
                                                  train=False,
                                                  download=True,
                                                  transform=normalize)
    cifat100_test_loader = DataLoader(cifar100_test,
                                      shuffle=True,
                                      num_workers=4,
                                      batch_size=BATCH_SIZE)

    n_samples = 0
    n_well_classified = 0
    n_well_classified_top5 = 0
    net.eval()

    well_classified_images = torch.tensor([]).to(device)
    well_classified_targets = torch.tensor([]).to(device)

    with torch.no_grad():
        for i, batch in enumerate(cifat100_test_loader):
            if i > 100:
                break
            image, target = batch
            image = image.to(device)
            target = target.to(device)
            n_samples += image.shape[0]

            prob = net(image).argmax(1)

            correct = (prob == target)
            n_well_classified += correct.sum()

            well_classified_images = torch.cat(
                (well_classified_images, image[correct]), 0)
            well_classified_targets = torch.cat(
                (well_classified_targets, target[correct]), 0)

    well_DS = wellImageDataset(well_classified_images.cpu(),
                               well_classified_targets.cpu())
    well_DL = DataLoader(well_DS, shuffle=False, num_workers=1, batch_size=10)

    misclassified_images = torch.tensor([]).to(device)
    misclassified_images_perb = torch.tensor([]).to(device)
    misclassified_targets = torch.tensor([]).to(device)
    n_misc = 0
    for i, (batch_image, batch_target) in enumerate(well_DL):
        batch_image = batch_image.cuda()
        batch_target = batch_target.cuda()
        per = attack.smia_attack(batch_image, 0.005, 0, net,
                                 batch_target.long(), 10)
        prob = net(per).argmax(1)
        correct = prob == batch_target
        k_well_class = (prob == batch_target).sum()
        n_misc += k_well_class

        misclassified_images = torch.cat(
            (misclassified_images, batch_image[~correct]), 0)
        misclassified_images_perb = torch.cat(
            (misclassified_images_perb, per[~correct]), 0)
        misclassified_targets = torch.cat(
            (misclassified_targets, batch_target[~correct]), 0)
        if i > 10:
            break
        print(i)
        print(k_well_class)
    print(n_misc / len(well_classified_images))

    example = denormalize_image(misclassified_images[1].unsqueeze(0))
    example_perb = denormalize_image(misclassified_images_perb[1].unsqueeze(0))
    example = example.detach().cpu().squeeze()
    example_perb = example_perb.detach().cpu().squeeze()
    plt.imshow(example)
    plt.show()
    plt.imshow(example_perb)
    plt.show()
예제 #7
0
@tf.function
def compute_loss_and_grads(combination_image, base_image,
                           style_reference_image):
    with tf.GradientTape() as tape:
        loss = total_loss(combination_image, base_image, style_reference_image)
    grads = tape.gradient(loss, combination_image)
    return loss, grads


optimizer = keras.optimizers.SGD(
    keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=100.0,
                                                decay_steps=100,
                                                decay_rate=0.99))

base_image = preprocess_image(base_image_path, size)
style_reference_image = preprocess_image(style_image_path, size)
combination_image = tf.Variable(preprocess_image(base_image_path, size))
print(base_image.shape, style_reference_image.shape, combination_image.shape)
print("Staring Traning")
iterations = 4000
for i in tqdm(range(1, iterations + 1)):
    loss, grads = compute_loss_and_grads(combination_image, base_image,
                                         style_reference_image)
    optimizer.apply_gradients([(grads, combination_image)])
    if i % 500 == 0:
        print("Iteration %d: loss=%.2f" % (i, loss))
        img = denormalize_image(combination_image.numpy(),
                                (image_width, image_height, 3))
        fname = file_prefix + "_at_iteration_%d.png" % i
        cv2.imwrite(fname, img)