Esempio n. 1
0
def runDiversitywithTransforms(layerName,
                               layerNeuron,
                               transforms=None,
                               imageSize=256,
                               batch=4,
                               weight=1e2):
    '''
    Function to run Lucent neuron diversity optimisation for a given Layer and Neuron (Channel) in a PyTorch CNN.
    This function uses image augmentation transforms to help improve the clarity and resolution of the produced neuron maximisations.

    '''
    if transforms == None:
        transforms = [
            transform.pad(16),
            transform.jitter(8),
            transform.random_scale([n / 100. for n in range(80, 120)]),
            transform.random_rotate(
                list(range(-10, 10)) + list(range(-5, 5)) +
                10 * list(range(-2, 2))),
            transform.jitter(2),
        ]
    batch_param_f = lambda: param.image(imageSize, batch=batch)
    obj = objectives.channel(
        layerName, layerNeuron) - weight * objectives.diversity(layerName)
    _ = render.render_vis(model_,
                          obj,
                          batch_param_f,
                          transforms=transforms,
                          show_inline=True)
Esempio n. 2
0
def runDiversity(layerName, layerNeuron, imageSize=256, batch=4, weight=1e2):
    '''
    Function to run Lucent neuron diversity optimisation for a given Layer and Neuron (Channel) in a PyTorch CNN.

    '''
    batch_param_f = lambda: param.image(imageSize, batch=batch)
    obj = objectives.channel(
        layerName, layerNeuron) - weight * objectives.diversity(layerName)
    _ = render.render_vis(model_, obj, batch_param_f, show_inline=True)
Esempio n. 3
0
def visualize_grad_fourier(model, obj, path, device):
    model.to(device).eval()
    param_f = lambda: param.image(128, fft=True, decorrelate=False)
    _ = render.render_vis(model,
                          obj,
                          param_f,
                          transforms=[],
                          save_image=True,
                          image_name=path,
                          show_image=False)
Esempio n. 4
0
def visualize_jitter(model, obj, path, device):
    model.to(device).eval()
    jitter_only = [transform.jitter(8)]

    param_f = lambda: param.image(512, fft=False, decorrelate=True)

    _ = render.render_vis(model,
                          obj,
                          param_f,
                          transforms=jitter_only,
                          save_image=True,
                          image_name=path,
                          show_image=False)
Esempio n. 5
0
def assert_gradient_descent(objective, model):
    params, image = param.image(224, batch=2)
    optimizer = torch.optim.Adam(params, lr=0.05)
    T = render.hook_model(model, image)
    objective_f = objectives.as_objective(objective)
    model(image())
    start_value = objective_f(T)
    for _ in range(NUM_STEPS):
        optimizer.zero_grad()
        model(image())
        loss = objective_f(T)
        loss.backward()
        optimizer.step()
    end_value = objective_f(T)
    assert start_value > end_value
Esempio n. 6
0
def test_integration(inceptionv1_model, decorrelate, fft):
    obj = "mixed3a_1x1_pre_relu_conv:0"
    param_f = lambda: param.image(224, decorrelate=decorrelate, fft=fft)
    optimizer = lambda params: torch.optim.Adam(params, lr=0.1)
    rendering = render.render_vis(
        inceptionv1_model,
        obj,
        param_f,
        optimizer=optimizer,
        thresholds=(1, 2),
        verbose=True,
        show_inline=True,
    )
    start_image, end_image = rendering
    assert (start_image != end_image).any()
Esempio n. 7
0
def visualize_filter(model, layer, filters, path, device):

    model.to(device).eval()

    if not os.path.exists(f'{path}/'):
        os.makedirs(f'{path}/')

    param_f = lambda: param.image(128, fft=False, decorrelate=False)

    for i in range(len(filters)):
        layer_name = f'features_{layer}:{filters[i]}'
        image_name = f"{path}/{layer_name}.jpg"
        _ = render.render_vis(model,
                              layer_name,
                              param_f,
                              save_image=True,
                              image_name=image_name.replace(':', '_'),
                              show_image=False)
Esempio n. 8
0
def gen_visualization(model, image_name, objective, parametrizer, optimizer,
                      transforms, image_size, neuron, params):
    if neuron:
        full_image_path = params[
            'prepped_model_path'] + '/visualizations/images/neuron/' + image_name
    else:
        full_image_path = params[
            'prepped_model_path'] + '/visualizations/images/channel/' + image_name
    if parametrizer is None:
        parametrizer = lambda: param.image(image_size)
    print('generating featviz with objective: %s' % str(objective))
    _ = render.render_vis(model,
                          objective,
                          parametrizer,
                          optimizer,
                          transforms=transforms,
                          save_image=True,
                          image_name=full_image_path,
                          show_inline=True)
Esempio n. 9
0
def visualize_multiple_neurons(model, neuron_names, save_path, device):

    model.to(device).eval()

    param_f = lambda: param.image(512, batch=1)

    neuron1 = neuron_names[0]
    print(neuron1)

    obj = objectives.channel(neuron_names[0][0], neuron_names[0][1])
    for i in range(1, len(neuron_names)):
        obj += objectives.channel(neuron_names[i][0], neuron_names[i][1])

    _ = render.render_vis(model,
                          obj,
                          param_f,
                          save_image=True,
                          image_name=f'{save_path}_placeholder.jpg',
                          show_image=False)
Esempio n. 10
0
def visualize_filter_fc2(model, layer, filters, path, class_labels, device):

    model.to(device).eval()

    if not os.path.exists(f'{path}/'):
        os.makedirs(f'{path}/')

    param_f = lambda: param.image(64, fft=False, decorrelate=False)

    for i in range(len(filters)):
        layer_name = f'classifier_{layer}:{filters[i]}'
        image_name = f"{path}/{layer_name}_{class_labels[filters[i]]}.jpg"
        _ = render.render_vis(model,
                              layer_name,
                              param_f,
                              fixed_image_size=64,
                              save_image=True,
                              image_name=image_name.replace(':', '_'),
                              show_image=False)
Esempio n. 11
0
def visualize_diversity_fc(model, layer, filter, path, batch_size, device):

    model.to(device).eval()

    if not os.path.exists(f'{path}/'):
        os.makedirs(f'{path}/')

    batch_param_f = lambda: param.image(128, batch=batch_size)
    layer_name = f'fc_{layer}:{filter}'

    obj = objectives.channel(
        f"fc_{layer}", filter) - 1e2 * objectives.diversity(f"fc_{layer}")

    image_name = f"{path}/{layer_name}_diversity.jpg"

    _ = render.render_vis(model,
                          obj,
                          batch_param_f,
                          save_image=True,
                          image_name=image_name.replace(':', '_'),
                          show_image=False)
Esempio n. 12
0
def feature_inversion(model,
                      device,
                      img,
                      layer=None,
                      n_steps=512,
                      cossim_pow=0.0):
    # Convert image to torch.tensor and scale image
    img = torch.tensor(np.transpose(img, [2, 0, 1])).to(device)
    upsample = torch.nn.Upsample(224)
    img = upsample(img)

    obj = objectives.Objective.sum([
        1.0 * dot_compare(layer, cossim_pow=cossim_pow),
        objectives.blur_input_each_step(),
    ])

    # Initialize parameterized input and stack with target image
    # to be accessed in the objective function
    params, image_f = param.image(224)

    def stacked_param_f():
        return params, lambda: torch.stack([image_f()[0], img])

    transforms = [
        transform.pad(8, mode='constant', constant_value=.5),
        transform.jitter(8),
        transform.random_scale([0.9, 0.95, 1.05, 1.1] + [1] * 4),
        transform.random_rotate(list(range(-5, 5)) + [0] * 5),
        transform.jitter(2),
    ]

    _ = render.render_vis(model,
                          obj,
                          stacked_param_f,
                          transforms=transforms,
                          thresholds=(n_steps, ),
                          show_image=False,
                          progress=False)
Esempio n. 13
0
def main():

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = inceptionv1(pretrained=True)
    model.to(device).eval()

    CPPN = False

    SPATIAL_DECORRELATION = True
    CHANNEL_DECORRELATION = True

    if CPPN:
        # CPPN parameterization
        param_f = lambda: param.cppn(224)
        opt = lambda params: torch.optim.Adam(params, 5e-3)
        # Some objectives work better with CPPN than others
        obj = "mixed4d_3x3_bottleneck_pre_relu_conv:139"
    else:
        param_f = lambda: param.image(
            224, fft=SPATIAL_DECORRELATION, decorrelate=CHANNEL_DECORRELATION)
        opt = lambda params: torch.optim.Adam(params, 5e-2)
        obj = "mixed4a:476"

    render.render_vis(model, obj, param_f, opt)
def render_activation_grid_very_naive(
        img,
        model,
        layer="main_net_0_ops_(1, 2)_ops_(0, 1)_op",
        cell_image_size=48,
        n_steps=1024):
    # First wee need, to normalize and resize the image
    img = torch.tensor(np.transpose(img, [2, 0, 1])).to(device)
    normalize = (transform.preprocess_inceptionv1() if model._get_name()
                 == "InceptionV1" else transform.normalize())
    transforms = [
        normalize,
        torch.nn.Upsample(size=224, mode="bilinear", align_corners=True),
    ]
    transforms_f = transform.compose(transforms)
    # shape: (1, 3, original height of img, original width of img)
    img = img.unsqueeze(0)
    # shape: (1, 3, 224, 224)
    img = transforms_f(img)

    # Here we compute the activations of the layer `layer` using `img` as input
    # shape: (layer_channels, layer_height, layer_width), the shape depends on the layer
    acts = get_layer(model, layer, img)[0]
    layer_channels, layer_height, layer_width = acts.shape
    # for each position `(y, x)` in the feature map `acts`, we optimize an image
    # to match with the features `acts[:, y, x]`
    # This means that the total number of cells (which is the batch size here)
    # in the grid is layer_height*layer_width.
    nb_cells = layer_height * layer_width

    # Parametrization of the of each cell in the grid
    param_f = lambda: param.image(cell_image_size, batch=nb_cells)
    params, image_f = param_f()

    obj = objectives.Objective.sum([
        # for each position in `acts`, maximize the dot product between the activations
        # `acts` at the position (y, x) and the features of the corresponding
        # cell image on our 'grid'. The activations at (y, x) is a vector of size
        # `layer_channels` (this depends on the `layer`). The features
        # of the corresponding cell on our grid is a tensor of shape
        # (layer_channels, cell_layer_height, cell_layer_width).
        # Note that cell_layer_width != layer_width and cell_layer_height != layer_weight
        # because the cell image size is smaller than the image size.
        # With `dot_compare`, we maximize the dot product between
        # cell_activations[y_cell, x_xcell] and acts[y,x] (both of size `layer_channels`)
        # for each possible y_cell and x_cell, then take the average to get a single
        # number. Check `dot_compare for more details.`
        dot_compare(layer,
                    acts[:, y:y + 1, x:x + 1],
                    batch=x + y * layer_width)
        for i, (
            x,
            y) in enumerate(product(range(layer_width), range(layer_height)))
    ])
    results = render.render_vis(
        model,
        obj,
        param_f,
        thresholds=(n_steps, ),
        progress=True,
        fixed_image_size=cell_image_size,
        show_image=False,
    )
    # shape: (layer_height*layer_width, cell_image_size, cell_image_size, 3)
    imgs = results[-1]  # last step results
    # shape: (layer_height*layer_width, 3, cell_image_size, cell_image_size)
    imgs = imgs.transpose((0, 3, 1, 2))
    imgs = torch.from_numpy(imgs)
    imgs = imgs[:, :, 2:-2, 2:-2]
    # turn imgs into a a grid
    grid = torchvision.utils.make_grid(imgs,
                                       nrow=int(np.sqrt(nb_cells)),
                                       padding=0)
    grid = grid.permute(1, 2, 0)
    grid = grid.numpy()
    render.show(grid)
    return imgs
Esempio n. 15
0
def fetch_deepviz_img_for_subgraph(model, layer_name, within_id, targetid,
                                   viz_folder, params):
    model = set_across_model(model, 'target_node', None)
    objective_str = layer_name + ':' + str(targetid)
    neuron = params['deepviz_neuron']
    #generate objective
    #objective = gen_objective(targetid,model,params,neuron=neuron)
    print('generating feature_viz objective string for %s' % targetid)

    if not params['deepviz_neuron']:
        #return layer_name+':'+str(within_id)
        objective = objectives.channel(layer_name, int(within_id))
    else:
        objective = objectives.neuron(layer_name, int(within_id))
    file_path = viz_folder + '/images.csv'
    parametrizer = params['deepviz_param']
    optimizer = params['deepviz_optim']
    transforms = params['deepviz_transforms']
    image_size = params['deepviz_image_size']

    param_str = object_2_str(parametrizer, "params['deepviz_param']=")
    optimizer_str = object_2_str(optimizer, "params['deepviz_optim']=")
    transforms_str = object_2_str(transforms, "params['deepviz_transforms']=")
    df = pd.read_csv(file_path, dtype=str)
    df_sel = df.loc[(df['targetid'] == str(targetid))
                    & (df['objective'] == objective_str) &
                    (df['parametrizer'] == param_str) &
                    (df['optimizer'] == optimizer_str) &
                    (df['transforms'] == transforms_str) &
                    (df['neuron'] == str(neuron))]
    if len(df_sel) == 0:
        print('deepviz image not found for %s, generating . . .' % targetid)
        #image_name = 'deepviz_'+str(targetid)+'_'+objective+'_'+str(time.time())+'.jpg'
        image_name = str(targetid) + '_' + objective_str + '_' + str(
            time.time()) + '.jpg'
        #gen_visualization(model,image_name,objective,parametrizer,optimizer,transforms,image_size,neuron,params)
        if neuron:
            full_image_path = viz_folder + '/neuron/' + image_name
        else:
            full_image_path = viz_folder + '/channel/' + image_name
        if parametrizer is None:
            parametrizer = lambda: param.image(image_size)
        print('generating featviz with objective: %s' % str(objective_str))
        _ = render.render_vis(model,
                              objective,
                              parametrizer,
                              optimizer,
                              transforms=transforms,
                              save_image=True,
                              image_name=full_image_path,
                              show_inline=True)
        with open(file_path, 'a') as csv:
            csv.write(','.join([
                image_name,
                str(targetid), objective_str, param_str, optimizer_str,
                transforms_str,
                str(neuron)
            ]) + '\n')
    else:
        print('found pre-generated image')
        image_name = df_sel.iloc[0]['image_name']

    if neuron:
        return 'neuron/' + image_name
    else:
        return 'channel/' + image_name
def generate_images(args):
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # Initialize arguments based on dataset chosen
    if args.dataset == "first5_mnist":
        args.output_size = 5
        args.input_channel = 1
    elif args.dataset == "last5_mnist":
        args.output_size = 5
        args.input_channel = 1
    elif args.dataset == "mnist":
        args.output_size = 10
        args.input_channel = 1
    elif args.dataset == "cifar10":
        args.output_size = 10
        args.input_channel = 3
    elif args.dataset == "fmnist":
        args.output_size = 10
        args.input_channel = 1
    elif args.dataset == "kmnist":
        args.output_size = 10
        args.input_channel = 1

    if args.arch == "resnet18":
        arch = ResNet18
    elif args.arch == "lenet5":
        arch = LeNet5
    elif args.arch == "lenet5_halfed":
        arch = LeNet5Halfed

    print(f"\nDataset: {args.dataset}")
    print(f"Arch: {args.arch}")
    print(f"Size: {args.size}\n")

    for i in range(len(args.seeds)):
        print(f"Iteration {i+1}, Seed {args.seeds[i]}")

        np.random.seed(args.seeds[i])
        torch.manual_seed(args.seeds[i])
        torch.cuda.manual_seed_all(args.seeds[i])
        torch.backends.cudnn.deterministic = True

        # Load model
        model = arch(input_channel=args.input_channel,
                     output_size=args.output_size).to(device)
        model.load_state_dict(
            torch.load(
                args.model_dir + f"{args.dataset}_{args.arch}_{args.seeds[i]}",
                map_location=torch.device(device),
            ))
        model.eval()

        # Generate images of each class
        for label in range(args.output_size):
            # Initialize number of images generated from each class, the last class fills the remaining
            if label == args.output_size - 1:
                nb_images = int(args.size / args.output_size +
                                args.size % args.output_size)
            else:
                nb_images = int(args.size / args.output_size)

            # Create the directory for saving if it does not exist
            create_op_dir(args.data_dir + "Synthetic " + args.dataset + "/" +
                          str(args.seeds[i]) + "/" + str(label) + "/")

            for idx in range(nb_images):
                param_f = lambda: param.image(32,
                                              decorrelate=True,
                                              fft=True,
                                              channels=args.input_channel)
                transforms = [
                    transform.pad(4),
                    transform.jitter(2),
                    transform.random_scale(
                        [1 + (i - 5) / 50.0 for i in range(11)]),
                    transform.random_rotate(list(range(-5, 6)) + 5 * [0]),
                    transform.jitter(2),
                ]

                render_vis(
                    model=model,
                    objective_f="labels:" + str(label),
                    param_f=param_f,
                    transforms=transforms,
                    preprocess=False,
                    thresholds=(512, ),
                    save_image=True,
                    image_name=os.path.dirname(os.path.abspath(__file__)) +
                    "/cache/data/" + "Synthetic " + args.dataset + "/" +
                    str(args.seeds[i]) + "/"
                    # + args.data_dir + "Synthetic " + args.dataset + "/" + str(args.seeds[i]) + "/"
                    + str(label) + "/image" + str(idx) + ".png",
                )
Esempio n. 17
0
def render_vis(
    model,
    objective_f,
    param_f=None,
    optimizer=None,
    transforms=None,
    thresholds=(512, ),
    verbose=False,
    preprocess=True,
    progress=True,
    show_image=True,
    save_image=False,
    image_name=None,
    show_inline=False,
    fixed_image_size=None,
):
    if param_f is None:
        param_f = lambda: param.image(128)
    # param_f is a function that should return two things
    # params - parameters to update, which we pass to the optimizer
    # image_f - a function that returns an image as a tensor
    params, image_f = param_f()

    if optimizer is None:
        optimizer = lambda params: torch.optim.Adam(params, lr=5e-2)
    optimizer = optimizer(params)

    if transforms is None:
        transforms = transform.standard_transforms
    transforms = transforms.copy()

    if preprocess:
        if model._get_name() == "InceptionV1":
            # Original Tensorflow InceptionV1 takes input range [-117, 138]
            transforms.append(transform.preprocess_inceptionv1())
        else:
            # Assume we use normalization for torchvision.models
            # See https://pytorch.org/docs/stable/torchvision/models.html
            transforms.append(transform.normalize())

    # Upsample images smaller than 224
    image_shape = image_f().shape
    if fixed_image_size is not None:
        new_size = fixed_image_size
    elif image_shape[2] < 224 or image_shape[3] < 224:
        new_size = 224
    else:
        new_size = None
    if new_size:
        transforms.append(
            torch.nn.Upsample(size=new_size,
                              mode="bilinear",
                              align_corners=True))

    transform_f = transform.compose(transforms)

    hook = hook_model(model, image_f)
    objective_f = objectives.as_objective(objective_f)

    if verbose:
        model(transform_f(image_f()))
        print("Initial loss: {:.3f}".format(objective_f(hook)))

    images = []
    try:
        for i in tqdm(range(1, max(thresholds) + 1), disable=(not progress)):
            optimizer.zero_grad()
            try:
                model.encode_image(transform_f(image_f()))
            except RuntimeError as ex:
                if i == 1:
                    # Only display the warning message
                    # on the first iteration, no need to do that
                    # every iteration
                    warnings.warn(
                        "Some layers could not be computed because the size of the "
                        "image is not big enough. It is fine, as long as the non"
                        "computed layers are not used in the objective function"
                        f"(exception details: '{ex}')")
            loss = objective_f(hook)
            loss.backward()
            optimizer.step()
            if i in thresholds:
                image = tensor_to_img_array(image_f())
                if verbose:
                    print("Loss at step {}: {:.3f}".format(
                        i, objective_f(hook)))
                    if show_inline:
                        show(image)
                images.append(image)
    except KeyboardInterrupt:
        print("Interrupted optimization at step {:d}.".format(i))
        if verbose:
            print("Loss at step {}: {:.3f}".format(i, objective_f(hook)))
        images.append(tensor_to_img_array(image_f()))

    if save_image:
        export(image_f(), image_name)
    if show_inline:
        show(tensor_to_img_array(image_f()))
    elif show_image:
        view(image_f())
    return images
Esempio n. 18
0
def render_vis(model,
               objective_f,
               param_f=None,
               optimizer=None,
               transforms=None,
               thresholds=(512, ),
               verbose=False,
               preprocess=True,
               progress=True,
               show_image=True,
               save_image=False,
               image_name=None,
               show_inline=False):
    if param_f is None:
        param_f = lambda: param.image(128)
    # param_f is a function that should return two things
    # params - parameters to update, which we pass to the optimizer
    # image_f - a function that returns an image as a tensor
    params, image_f = param_f()

    if optimizer is None:
        optimizer = lambda params: torch.optim.Adam(params, lr=5e-2)
    optimizer = optimizer(params)

    if transforms is None:
        transforms = transform.standard_transforms.copy()

    if preprocess:
        if model._get_name() == "InceptionV1":
            # Original Tensorflow InceptionV1 takes input range [-117, 138]
            transforms.append(transform.preprocess_inceptionv1())
        else:
            # Assume we use normalization for torchvision.models
            # See https://pytorch.org/docs/stable/torchvision/models.html
            transforms.append(transform.normalize())

    # Upsample images smaller than 224
    image_shape = image_f().shape
    if image_shape[2] < 224 or image_shape[3] < 224:
        transforms.append(
            torch.nn.Upsample(size=224, mode='bilinear', align_corners=True))

    transform_f = transform.compose(transforms)

    hook = hook_model(model, image_f)
    objective_f = objectives.as_objective(objective_f)

    if verbose:
        model(transform_f(image_f()))
        print("Initial loss: {:.3f}".format(objective_f(hook)))

    images = []

    try:
        for i in tqdm(range(1, max(thresholds) + 1), disable=(not progress)):
            optimizer.zero_grad()
            model(transform_f(image_f()))
            loss = objective_f(hook)
            loss.backward()
            optimizer.step()
            if i in thresholds:
                image = tensor_to_img_array(image_f())
                if verbose:
                    print("Loss at step {}: {:.3f}".format(
                        i, objective_f(hook)))
                    if show_inline:
                        show(image)
                images.append(image)
    except KeyboardInterrupt:
        print("Interrupted optimization at step {:d}.".format(i))
        if verbose:
            print("Loss at step {}: {:.3f}".format(i, objective_f(hook)))
        images.append(tensor_to_img_array(image_f()))

    if save_image:
        export(image_f(), image_name)
    if show_inline:
        show(tensor_to_img_array(image_f()))
    elif show_image:
        view(image_f())
    return images
Esempio n. 19
0
 def param_f():
     return param.image(224, fft=True, decorrelate=True)
Esempio n. 20
0
def test_hook_model(inceptionv1_model):
    _, image_f = param.image(224)
    hook = render.hook_model(inceptionv1_model, image_f)
    inceptionv1_model(image_f())
    assert hook("input").shape == (1, 3, 224, 224)
    assert hook("labels").shape == (1, 1008)