Exemplo n.º 1
0
def main():
    model = load_model('model2.h5py')
    model.summary()
    # swap softmax activation function to linear
    layer_idx = -1
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)


    layer_names = ['leaky_re_lu_1', 'leaky_re_lu_2', 'leaky_re_lu_3', 'leaky_re_lu_4']
    for lid in range(len(layer_names)):
        layer_name = layer_names[lid]
        layer_idx = utils.find_layer_idx(model, layer_name)
        filters = np.arange(get_num_filters(model.layers[layer_idx]))
        filters = _shuffle(filters)
        vis_images = []
        for idx in range(16):
            indices = filters[idx]
            img = visualize_activation(model, layer_idx, filter_indices=indices, tv_weight=0.,
                               input_modifiers=[Jitter(0.5)])
            vis_images.append(img)
        #img = img.reshape((48, 48))
        #plt.imshow(img, cmap="Blues")
        #plt.show()

        stitched = utils.stitch_images(vis_images, cols=8)
        plt.figure()
        plt.axis('off')
        shape = stitched.shape
        stitched = stitched.reshape((shape[0], shape[1]))
        plt.imshow(stitched)
        plt.title(layer_name)
        plt.tight_layout()
        plt.savefig('Filter_{}.png'.format(lid))
Exemplo n.º 2
0
def vis_3():
    # 20 is the imagenet category for 'ouzel'
    # Jitter 16 pixels along all dimensions to during the optimization process.
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=36,
                               max_iter=2000,
                               input_modifiers=[Jitter(16)])
    save_image(img, 'filter_single_6_' + str(37) + '.png')
Exemplo n.º 3
0
 def get_img(idx, name):
     #img = visualize_activation_tf(vis_img_input,end_points,name, filter_indices=idx, max_iter=600, act_max_weight=1.,tv_weight=0.01,lp_norm_weight=10, input_modifiers=[Jitter(16)], verbose=True)
     #img = visualize_activation_tf(vis_img_input,end_points,name, filter_indices=idx, max_iter=600, act_max_weight=1.,tv_weight=0.01,lp_norm_weight=10, input_modifiers=[Jitter(16)], verbose=True)
     seed = visualize_activation_tf(vis_img_input,
                                    end_points,
                                    name,
                                    filter_indices=idx,
                                    act_max_weight=8.,
                                    tv_weight=0.,
                                    input_modifiers=[Jitter(0.05)],
                                    verbose=True)
     img = visualize_activation_tf(vis_img_input,
                                   end_points,
                                   name,
                                   filter_indices=idx,
                                   seed_input=seed,
                                   input_modifiers=[Jitter(0.05)],
                                   verbose=True)
     return img
def show_activation(model, layer_idx):
    from vis.visualization import visualize_activation
    from vis.input_modifiers import Jitter
    # 1 is the imagenet category for 'PNEUMONIA'
    im = visualize_activation(model,
                              layer_idx,
                              filter_indices=None,
                              max_iter=500,
                              input_modifiers=[Jitter(16)],
                              verbose=False)
    plt.imshow(im)
    plt.show()
def get_img(idx, name):
    img = visualize_activation_tf(inputs,
                                  end_points,
                                  name,
                                  filter_indices=idx,
                                  max_iter=600,
                                  act_max_weight=1.,
                                  tv_weight=0.01,
                                  lp_norm_weight=10,
                                  input_modifiers=[Jitter(16)],
                                  verbose=True)
    return img
Exemplo n.º 6
0
def vis_4():
    import numpy as np
    # categories = np.random.permutation(1000)[:15]

    vis_images = []
    image_modifiers = [Jitter(16)]
    for idx in range(10):
        print('filter_indices_: ' + str(idx + 1))
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=idx,
                                   max_iter=4000,
                                   input_modifiers=image_modifiers)
        save_image(img, '[4000]filter_' + str(idx + 1) + '.png')
Exemplo n.º 7
0
def generate_max_activation(model, gradmod, backprop):
    activations = visualize_activation(model,
                                       -1,
                                       filter_indices=[0],
                                       verbose=True,
                                       input_modifiers=[Jitter(16)],
                                       backprop_modifier=backprop,
                                       grad_modifier=gradmod,
                                       act_max_weight=1,
                                       lp_norm_weight=10,
                                       tv_weight=10)
    plt.imsave('activations_inferno.eps', activations[:, :, 0], cmap='inferno')
    plt.imsave('activations_plasma.eps', activations[:, :, 0], cmap='plasma')
    plt.imsave('activations_magma.eps', activations[:, :, 0], cmap='magma')
    plt.imsave('activations_gray.eps', activations[:, :, 0], cmap='gray')
    plt.imsave('activations_viridis.eps', activations[:, :, 0], cmap='viridis')
Exemplo n.º 8
0
def layered_actmax(sample_count):
    for layer_nm in layerlist_for_layered_actmax:
        layer_idx = utils.find_layer_idx(model, layer_nm)
        num_filters = get_num_filters(model.layers[layer_idx])
        drawn_filters = random.choices(np.arange(num_filters), k=sample_count)
        for filter_id in drawn_filters:
            img = visualize_activation(model,
                                       layer_idx,
                                       filter_indices=filter_id,
                                       input_modifiers=[Jitter(16)])
            img = img.reshape(IMG_SIZE, IMG_SIZE, IMG_DEPTH)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            plt.imshow(img, cmap='gray')
            img_path = os.path.join(IMG_DIR,
                                    layer_nm + '_' + str(filter_id) + '.jpg')
            plt.imsave(img_path, img)
            print(f'Saved layer {layer_nm}/{filter_id} to file!')
    print('done!')
Exemplo n.º 9
0
def visualiseDenseLayer(model,
                        layer_name,
                        output_classes,
                        verbose,
                        save=False):
    '''
    Makes a plot for visualising  the activations of a dense layer.
    :param model:  The model to visualise.
    :param layer_name: The name of the dense layer to visualise.
    :param output_classes: The number of activations in the layer.
    :param verbose: Print statements of progress.
    :return: N/A
    '''

    layer_index = utils.find_layer_idx(model, layer_name)
    model.layers[layer_index].activation = activations.linear
    model = utils.apply_modifications(model)

    vis_images = []
    for filter_index in range(0, output_classes):
        if (verbose):
            print("Preparing Visualisation for class {} in layer {}".format(
                filter_index, layer_name))
        visualisation = visualize_activation(model,
                                             layer_index,
                                             filter_index,
                                             max_iter=500,
                                             input_modifiers=[Jitter(16)])

        img = utils.draw_text(visualisation, 'Class {}'.format(filter_index))
        vis_images.append(img)

    stitched = utils.stitch_images(vis_images, cols=4)

    if (save):
        matplotlib.image.imsave("../SavedData/" + save, stitched)
    else:
        plt.imshow(stitched)
        plt.figure()
        plt.axis('off')
        plt.show()
Exemplo n.º 10
0
    def apply(self, model, layer, filter, nb_iter):
        # Changer l'activation softmax par linear
        model.layers[layer].activation = activations.linear
        model = utils.apply_modifications(model)

        img = visualize_activation(model,
                                   layer,
                                   filter_indices=filter,
                                   max_iter=nb_iter,
                                   tv_weight=1.,
                                   lp_norm_weight=0.,
                                   verbose=True,
                                   input_modifiers=[Jitter(16)])
        if img.shape[2] == 1:
            image = np.zeros(shape=(int(img.shape[0]), int(img.shape[1]), 3))
            for i in range(len(image)):
                for j in range(len(image[i])):
                    image[i][j][0] = img[i][j][0]
                    image[i][j][1] = img[i][j][0]
                    image[i][j][2] = img[i][j][0]
            img = image
        return img
def main():
    for domain in domains:
        for model_name in model_names:
            for init_opt in init_opts:
                for preprocess_opt in preprocess_opts:
                    for stage in stages:
                        log_path = os.path.join(log_path_main, domain,
                                                model_name, init_opt, stage)
                        if not os.path.exists(log_path):
                            os.makedirs(log_path)
                        fold = '0'
                        K.clear_session()

                        if init_opt == 'random':
                            model = model_loader.load_full_model(
                                model_name, random_weights=True, no_cats=2)
                        elif init_opt == 'ImageNet':
                            model = model_loader.load_full_model(
                                model_name, random_weights=False, no_cats=2)
                        elif init_opt == 'fine-tuned':  # fine-tuned from ImageNet
                            model_path = os.path.join('log', domain,
                                                      model_name, 'ImageNet',
                                                      preprocess_opt, fold,
                                                      'stage_5.h5')
                            model = model_loader.load_full_model(model_name,
                                                                 no_cats=2)
                            model.load_weights(model_path)
                        # keras.utils.plot_model(model, to_file=os.path.join(log_path_main, model_name + '.png'))

                        if model_name == 'ResNet50':
                            end_layer = ResNet50_layer_names[stage]
                        elif model_name == 'VGG19':
                            end_layer = VGG19_layer_names[stage]

                        ind_layer = utils.find_layer_idx(model, end_layer)
                        model.layers[-1].activation = keras.activations.linear
                        model = utils.apply_modifications(model)

                        #for ind_filter in range(model.layers[ind_layer].output_shape[-1]):
                        for ind_filter in range(
                                min(200,
                                    model.layers[ind_layer].output_shape[-1])):
                            if stage == 'stage_1':
                                img = visualize_activation(
                                    model,
                                    ind_layer,
                                    filter_indices=ind_filter,
                                    input_modifiers=[Jitter()],
                                    tv_weight=0,
                                    max_iter=200)
                            elif stage == 'stage_2':
                                img = visualize_activation(
                                    model,
                                    ind_layer,
                                    filter_indices=ind_filter,
                                    input_modifiers=[Jitter()],
                                    tv_weight=1,
                                    max_iter=200)
                            elif stage == 'stage_3':
                                img = visualize_activation(
                                    model,
                                    ind_layer,
                                    filter_indices=ind_filter,
                                    input_modifiers=[Jitter()],
                                    tv_weight=2,
                                    max_iter=200)
                            elif stage == 'stage_4':
                                img = visualize_activation(
                                    model,
                                    ind_layer,
                                    filter_indices=ind_filter,
                                    input_modifiers=[Jitter()],
                                    tv_weight=1,
                                    max_iter=200)
                            elif stage == 'stage_5':
                                img = visualize_activation(
                                    model,
                                    ind_layer,
                                    filter_indices=ind_filter,
                                    input_modifiers=[Jitter()],
                                    tv_weight=0,
                                    max_iter=200)
                            elif stage == 'stage_final':
                                img = visualize_activation(
                                    model,
                                    ind_layer,
                                    filter_indices=ind_filter,
                                    input_modifiers=[Jitter()],
                                    tv_weight=2,
                                    max_iter=400)

                            img = Image.fromarray(img, 'RGB')
                            img.save(
                                os.path.join(log_path,
                                             str(ind_filter) + '.png'), 'PNG')
Exemplo n.º 12
0
                                          steps=10)
print('Test loss: {:.4f}. Test Accuracy: {:.4f}'.format(
    test_loss, test_accuracy))

if args.layer_to_visualize:
    print('Visualizing model activation')
    filters_to_visualize = [0, 1, 2, 3, 4, 5, 6, 7]

    for filter_to_visualize in filters_to_visualize:
        print('Visualizing layer {} filter {}'.format(
            model.layers[args.layer_to_visualize].name, filter_to_visualize))
        visualization = visualize_activation(
            model,
            args.layer_to_visualize,
            filter_indices=filter_to_visualize,
            input_modifiers=[Jitter(0)])
        plt.imshow(visualization)
        plt.title(f'Filter = {filter_to_visualize}')
        plt.axis('off')
        plt.show()


def display(display_list):
    plt.figure(figsize=(15, 15))

    title = ['Input Image', 'True Mask', 'Predicted Mask']

    for i in range(len(display_list)):
        plt.subplot(1, len(display_list), i + 1)
        plt.title(title[i])
        plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
Exemplo n.º 13
0
from vis.losses import ActivationMaximization
from vis.regularizers import TotalVariation, LPNorm
from vis.input_modifiers import Jitter
from vis.optimizer import Optimizer
from vis.callbacks import GifGenerator

if __name__ == "__main__":
    lucky_num = 50756711264384381850616619995309447969109689825336919605444730053665222018857 % (2 ** 32)
    np.random.seed(lucky_num)
    set_random_seed(lucky_num)

    os.environ["CUDA_VISIBLE_DEVICES"] = "2"

    modelH5 = sys.argv[1]
    #  outputDir = sys.argv[2]

    model = load_model(modelH5)
    layerDict = dict([(layer.name, layer) for layer in model.layers[1:]])
    layerName = "dense_5"

    for idx in range(7):
        outputClass = [0]
        losses = [(ActivationMaximization(layerDict[layerName], outputClass), 2),
                    (LPNorm(model.input), 10),
                    (TotalVariation(model.input), 10)]
        opt = Optimizer(model.input, losses)
        opt.minimize(max_iter=500, verbose=True, input_modifiers=[Jitter()], callbacks=[GifGenerator('OptProgress_%d' % (idx))])


Exemplo n.º 14
0
plt.rcParams['figure.figsize'] = (18, 6)

# 20 is the imagenet category for 'ouzel'
img = visualize_activation(model, layer_idx, filter_indices=3)
plt.imshow(img)

idx = 59
# Generate input image for each filter.
new_vis_images = []
for i, idx in enumerate(filters):
    # We will seed with optimized image this time.
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=idx,
                               seed_input=vis_images[i],
                               input_modifiers=[Jitter(0.05)])

    # Utility to overlay text on image.
    img = utils.draw_text(img, 'Filter {}'.format(idx))
    new_vis_images.append(img)

# Generate stitched image palette with 5 cols so we get 2 rows.
stitched = utils.stitch_images(new_vis_images, cols=5)
plt.figure()
plt.axis('off')
plt.imshow(stitched)
plt.show()

from vis.visualization import get_num_filters

selected_indices = []
Exemplo n.º 15
0
#
#

layer_idx_conv = utils.find_layer_idx(model, 'predictions')
model.layers[layer_idx_conv].activation = activations.linear
model = utils.apply_modifications(model)
filter_indices = 392
# 卷積層的卷積核的最大激活input===========================================================================
# 觀察第一個卷積層的第2個卷積核(filter_indices=1)的最大激活input,這個最大激活input即可表示這個卷積核所關注並提取的特征(人為可理解的)
# data_input_c[:,:,:,0]=max_activition_norm
# max_activition_norm = visualize_activation(model, layer_idx_conv, filter_indices=filter_indices, max_iter=1000, input_modifiers=[Jitter(9)],  verbose=True,seed_input=data_input_c)
max_activition_norm = visualize_activation(model,
                                           layer_idx_conv,
                                           filter_indices=filter_indices,
                                           max_iter=5000,
                                           input_modifiers=[Jitter(16)],
                                           verbose=True)
# max_activition_norm = np.squeeze(max_activition_norm, axis=-1)
for ii in range(1):
    plt.figure()
    # max_activition_piece = max_activition_norm[:, :, ii]
    plt.imshow(max_activition_norm)
    # plt.imshow((data_input_c[:,:,ii,0]), cmap=plt.cm.jet)
    plt.title(str(filter_indices))
    plt.show()
    # plt.figure()
    # plt.imshow(max_activition_norm)
    # # plt.imshow((data_input_c[:,:,ii,0]), cmap=plt.cm.jet)
    # plt.title(str(filter_indices))
    # plt.show()
Exemplo n.º 16
0
layer_idx = utils.find_layer_idx(model, 'batch_normalization_xx')
print(model.layers[layer_idx].get_weights())
'''

for layer_name in layer_names:
    layer_idx = utils.find_layer_idx(model, layer_name)

    # Visualize all filters in this layer.
    filters = np.arange(get_num_filters(model.layers[layer_idx]))

    vis_images = []
    for idx in filters:
        #if idx % 2 == 0:
        # Generate input image for each filter.
        img = visualize_activation(
            model,
            layer_idx,
            filter_indices=idx,
            act_max_weight=10,
            lp_norm_weight=0.01,
            tv_weight=0.05  #, lp_norm_weight=0, tv_weight=0
            ,
            max_iter=200,
            input_modifiers=[Jitter()])  #, verbose=True

        vis_images.append(img)
        print(idx)

    stitched = utils.stitch_images(vis_images, cols=24)
    scipy.misc.imsave(layer_name + '.png', stitched)
from vis.losses import ActivationMaximization
from vis.regularizers import TotalVariation, LPNorm
from vis.input_modifiers import Jitter
from vis.optimizer import Optimizer

from vis.callbacks import GifGenerator
from vis.utils.vggnet import VGG16

# Build the VGG16 network with ImageNet weights
model = VGG16(weights='imagenet', include_top=True)
print('Model loaded.')

# The name of the layer we want to visualize
# (see model definition in vggnet.py)
layer_name = 'predictions'
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
output_class = [20]

losses = [(ActivationMaximization(layer_dict[layer_name], output_class), 2),
          (LPNorm(model.input), 10), (TotalVariation(model.input), 10)]
opt = Optimizer(model.input, losses)
opt.minimize(max_iter=500,
             verbose=True,
             image_modifiers=[Jitter()],
             callbacks=[GifGenerator('opt_progress')])
Exemplo n.º 18
0
#    plt.imshow(img[...,0])
#plt.show()

#img = visualize_activation(model,layer_idx,filter_indices =7,max_iter=500,input_range=(0., 1.),input_modifiers=[Jitter(16)])
#plt.figure()
#plt.imshow(img)
#plt.show()

vis_images = []
for idx in filters:
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=idx,
                               tv_weight=0,
                               lp_norm_weight=0.1,
                               input_modifiers=[Jitter(0.05)])
    vis_images.append(img)

new_vis_image = []
for i, idx in enumerate(filters):
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=idx,
                               max_iter=1000,
                               tv_weight=0.3,
                               lp_norm_weight=0.4,
                               seed_input=vis_images[i],
                               input_modifiers=[Jitter(0.05)])

    new_vis_image.append(img)
Exemplo n.º 19
0
def maxout(model, layer, filters):
    # Changer l'activation softmax par linear
    model.layers[layer].activation = activations.linear
    model = apply_modifications(model)

    act = visualize_activation(model, layer, filter_indices=filters, tv_weight=1., lp_norm_weight=0., verbose=True, input_modifiers=[Jitter(16)])
    return act