Пример #1
0
def runDiversitywithTransforms(layerName,
                               layerNeuron,
                               transforms=None,
                               imageSize=256,
                               batch=4,
                               weight=1e2):
    '''
    Function to run Lucent neuron diversity optimisation for a given Layer and Neuron (Channel) in a PyTorch CNN.
    This function uses image augmentation transforms to help improve the clarity and resolution of the produced neuron maximisations.

    '''
    if transforms == None:
        transforms = [
            transform.pad(16),
            transform.jitter(8),
            transform.random_scale([n / 100. for n in range(80, 120)]),
            transform.random_rotate(
                list(range(-10, 10)) + list(range(-5, 5)) +
                10 * list(range(-2, 2))),
            transform.jitter(2),
        ]
    batch_param_f = lambda: param.image(imageSize, batch=batch)
    obj = objectives.channel(
        layerName, layerNeuron) - weight * objectives.diversity(layerName)
    _ = render.render_vis(model_,
                          obj,
                          batch_param_f,
                          transforms=transforms,
                          show_inline=True)
Пример #2
0
def test_random_rotate_odd_size():
    rotate = transform.random_rotate([90])
    tensor = torch.tensor([[[[0, 0, 1], [0, 0, 1], [0, 0, 1]],
                            [[0, 0, 1], [0, 0, 1], [0, 0, 1]],
                            [[0, 0, 1], [0, 0, 1], [0, 0, 1]]]]).to(device)
    assert torch.all(
        rotate(tensor).eq(
            torch.tensor([[[[1, 1, 1], [0, 0, 0], [0, 0, 0]],
                           [[1, 1, 1], [0, 0, 0], [0, 0, 0]],
                           [[1, 1, 1], [0, 0, 0], [0, 0, 0]]]]).to(device)))
Пример #3
0
def test_random_rotate_even_size():
    rotate = transform.random_rotate([np.pi / 2], units="rads")
    tensor = torch.tensor([[
        [[0, 1], [0, 1]],
        [[0, 1], [0, 1]],
        [[0, 1], [0, 1]],
    ]]).to(device)
    assert torch.all(
        rotate(tensor).eq(
            torch.tensor([[
                [[1, 1], [0, 0]],
                [[1, 1], [0, 0]],
                [[1, 1], [0, 0]],
            ]]).to(device)))
Пример #4
0
def feature_inversion(model,
                      device,
                      img,
                      layer=None,
                      n_steps=512,
                      cossim_pow=0.0):
    # Convert image to torch.tensor and scale image
    img = torch.tensor(np.transpose(img, [2, 0, 1])).to(device)
    upsample = torch.nn.Upsample(224)
    img = upsample(img)

    obj = objectives.Objective.sum([
        1.0 * dot_compare(layer, cossim_pow=cossim_pow),
        objectives.blur_input_each_step(),
    ])

    # Initialize parameterized input and stack with target image
    # to be accessed in the objective function
    params, image_f = param.image(224)

    def stacked_param_f():
        return params, lambda: torch.stack([image_f()[0], img])

    transforms = [
        transform.pad(8, mode='constant', constant_value=.5),
        transform.jitter(8),
        transform.random_scale([0.9, 0.95, 1.05, 1.1] + [1] * 4),
        transform.random_rotate(list(range(-5, 5)) + [0] * 5),
        transform.jitter(2),
    ]

    _ = render.render_vis(model,
                          obj,
                          stacked_param_f,
                          transforms=transforms,
                          thresholds=(n_steps, ),
                          show_image=False,
                          progress=False)
def generate_images(args):
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # Initialize arguments based on dataset chosen
    if args.dataset == "first5_mnist":
        args.output_size = 5
        args.input_channel = 1
    elif args.dataset == "last5_mnist":
        args.output_size = 5
        args.input_channel = 1
    elif args.dataset == "mnist":
        args.output_size = 10
        args.input_channel = 1
    elif args.dataset == "cifar10":
        args.output_size = 10
        args.input_channel = 3
    elif args.dataset == "fmnist":
        args.output_size = 10
        args.input_channel = 1
    elif args.dataset == "kmnist":
        args.output_size = 10
        args.input_channel = 1

    if args.arch == "resnet18":
        arch = ResNet18
    elif args.arch == "lenet5":
        arch = LeNet5
    elif args.arch == "lenet5_halfed":
        arch = LeNet5Halfed

    print(f"\nDataset: {args.dataset}")
    print(f"Arch: {args.arch}")
    print(f"Size: {args.size}\n")

    for i in range(len(args.seeds)):
        print(f"Iteration {i+1}, Seed {args.seeds[i]}")

        np.random.seed(args.seeds[i])
        torch.manual_seed(args.seeds[i])
        torch.cuda.manual_seed_all(args.seeds[i])
        torch.backends.cudnn.deterministic = True

        # Load model
        model = arch(input_channel=args.input_channel,
                     output_size=args.output_size).to(device)
        model.load_state_dict(
            torch.load(
                args.model_dir + f"{args.dataset}_{args.arch}_{args.seeds[i]}",
                map_location=torch.device(device),
            ))
        model.eval()

        # Generate images of each class
        for label in range(args.output_size):
            # Initialize number of images generated from each class, the last class fills the remaining
            if label == args.output_size - 1:
                nb_images = int(args.size / args.output_size +
                                args.size % args.output_size)
            else:
                nb_images = int(args.size / args.output_size)

            # Create the directory for saving if it does not exist
            create_op_dir(args.data_dir + "Synthetic " + args.dataset + "/" +
                          str(args.seeds[i]) + "/" + str(label) + "/")

            for idx in range(nb_images):
                param_f = lambda: param.image(32,
                                              decorrelate=True,
                                              fft=True,
                                              channels=args.input_channel)
                transforms = [
                    transform.pad(4),
                    transform.jitter(2),
                    transform.random_scale(
                        [1 + (i - 5) / 50.0 for i in range(11)]),
                    transform.random_rotate(list(range(-5, 6)) + 5 * [0]),
                    transform.jitter(2),
                ]

                render_vis(
                    model=model,
                    objective_f="labels:" + str(label),
                    param_f=param_f,
                    transforms=transforms,
                    preprocess=False,
                    thresholds=(512, ),
                    save_image=True,
                    image_name=os.path.dirname(os.path.abspath(__file__)) +
                    "/cache/data/" + "Synthetic " + args.dataset + "/" +
                    str(args.seeds[i]) + "/"
                    # + args.data_dir + "Synthetic " + args.dataset + "/" + str(args.seeds[i]) + "/"
                    + str(label) + "/image" + str(idx) + ".png",
                )