def heatmap(self, path):
        # Grad-CAM requires the category of the image as input. So we need predict image to get the label first.
        self.img = utils.load_img(path, target_size=(224, 224))
        self.x = image.img_to_array(self.img)
        self.x = np.expand_dims(self.x, axis=0)
        self.y = self.model.predict(self.x, verbose=1)
        self.y = self.y.argmax(axis=-1)
        self.label = [
            'dyed-lifted-polyps', 'dyed-resection-margins', 'esophagitis',
            'normal-cecum', 'normal-pylorus', 'normal-z-line', 'polyps',
            'ulcerative-colitis'
        ]
        self.label = np.array(self.label)
        self.layer_idx = utils.find_layer_idx(self.model, 'dense_1')

        # Swap softmax with linear
        self.model.layers[self.layer_idx].activation = activations.linear
        self.model = utils.apply_modifications(self.model)

        for modifier in ['guided']:

            f, ax = plt.subplots(1, 1)
            plt.suptitle(self.label[self.y])

            # Model, layer id, class ,image as input.
            self.grads = visualize_cam(self.model,
                                       self.layer_idx,
                                       filter_indices=self.y,
                                       seed_input=self.img,
                                       backprop_modifier=modifier)
            # Lets overlay the heatmap onto original image.
            self.jet_heatmap = np.uint8(cm.jet(self.grads)[..., :3] * 255)
            self.a = ax.imshow(overlay(self.jet_heatmap, self.img))

            plt.savefig("image/attention.jpg")
            return self.a
Exemplo n.º 2
0
    def vis_cam(self, x_test, scores, img_idx, fname, output_path):
        json_fname = os.path.join(output_path, 'model.json')
        layer_idx = utils.find_layer_idx(self.model, 'predictions')
        self.model.layers[layer_idx].activation = activations.linear
        self.model = utils.apply_modifications(self.model)
        classlabel = ['PA', 'Genuine']

        for class_idx in range(len(classlabel)):

            seed_input = x_test[img_idx]
            fname_original = fname.split('/')[:-6] + [
                '3',
                'origin_image',
            ] + fname.split('/')[-4:]
            fname_original = '/'.join(fname_original)
            fname_original = "{}.png".format(
                os.path.splitext(fname_original)[0])
            img = cv2.imread(fname_original, cv2.IMREAD_COLOR)[:, :, ::-1]
            output_fname = os.path.join(
                output_path,
                'vis_cam',
                classlabel[class_idx],
                os.path.relpath(fname, self.output_path).replace('../', ''),
            )
            output_fname = "{}.png".format(os.path.splitext(output_fname)[0])
            safe_create_dir(os.path.dirname(output_fname))
            grad_top = visualize_cam(self.model,
                                     layer_idx,
                                     class_idx,
                                     seed_input,
                                     penultimate_layer_idx=None,
                                     backprop_modifier=None,
                                     grad_modifier=None)
            print('-- saving visualizations in', output_fname)
            self.plot_map(grad_top, img, classlabel[class_idx],
                          scores[img_idx, class_idx], output_fname)
Exemplo n.º 3
0
            image_path = visualization_dir / "vis_epoch_{0:04d}.png".format(
                epoch)
            img.save(str(image_path))

    elif args.visualize == "layers":
        visualization_path /= "layers/ACGAN"

        # visualize each class
        for t in range(len(tags)):
            save_path = visualization_path / "out_{}.png".format(tags[t])
            if save_path.exists():
                # print("{} already visualized".format(tags[t]))
                continue

            output_layer_name = acgan.generator.outputs[0].name
            layer_idx = utils.find_layer_idx(acgan.generator,
                                             output_layer_name)

            # Swap softmax with linear
            acgan.generator.layers[layer_idx].activation = activations.linear
            acgan.generator = utils.apply_modifications(acgan.generator)

            img = visualize_activation(acgan.generator,
                                       layer_idx,
                                       filter_indices=t,
                                       verbose=True)
            array_to_img(img).save(save_path)
            print("saved to {}".format(str(save_path)))

        # visualize each layer
        for layer_name in [layer.name for layer in acgan.generator.layers]:
            save_path = visualization_path / "{}.png".format(layer_name)
Exemplo n.º 4
0
# Generate generalization metrics
score = model.evaluate(input_test, target_test, verbose=0)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')

# =============================================
# Grad-CAM code
# =============================================
from vis.visualization import visualize_cam, overlay
from vis.utils import utils
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm

# Find the index of the to be visualized layer above
layer_index = utils.find_layer_idx(model, 'visualized_layer')

# Swap softmax with linear
model.layers[layer_index].activation = activations.linear
model = utils.apply_modifications(model)

# Numbers to visualize
indices_to_visualize = [0, 12, 38, 83, 112, 74, 190]

# Visualize
for index_to_visualize in indices_to_visualize:
    input_image = input_test[index_to_visualize]
    input_class = np.argmax(target_test[index_to_visualize])
    # Matplotlib preparations
    fig, axes = plt.subplots(1, 3)
    # Generate visualization
Exemplo n.º 5
0
#
# We load the original model, with the top FC layers included.

# In[4]:

model_origin = InceptionV3(weights='imagenet', include_top=True)

# We replace the activation of the last layer by a linear activation, as softmax introduces dependencies between output nodes
#
# This operation may take several minutes...

# In[7]:

# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model_origin, 'predictions')
print("Remove Activation from Last Layer")
# Swap softmax with linear
model_origin.layers[layer_idx].activation = activations.linear
print("Done. Now Applying changes to the model ...")
model_origin = utils.apply_modifications(model_origin)

# We import an image and preprocess it according to the pre-proc function of the InceptionV3 model. Then we compute the activation map for a specific class. The image is an husky, we then compute the activation map for the husky/eskimo-dog class which corresponds to label 248 for ImageNet db.

# In[8]:

#CAM on images for InceptionV3 network.
im_file = "husky.jpg"
img1 = image.load_img(im_file, target_size=(299, 299))
img1 = image.img_to_array(img1)
img1 = np.expand_dims(img1, axis=0)
Exemplo n.º 6
0
sio.savemat('y_pred_cifar10_new_way_nov12.mat', {'y_pred': y_pred})
y_pred_arg = np.argmax(y_pred, axis=1)
sio.savemat('y_pred_arg_cifar10_new_way_nov12.mat', {'y_pred_arg': y_pred_arg})

from vis.visualization import visualize_activation
from vis.utils import utils
from keras import activations
import numpy as np

from matplotlib import pyplot as plt
#%matplotlib inline
plt.rcParams['figure.figsize'] = (18, 6)

# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'dense_3')

# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)

# This is the output node we want to maximize.
filter_idx = 0
img = visualize_activation(model, layer_idx, filter_indices=filter_idx)
plt.imshow(img[..., 0])

for tv_weight in [1e-3, 1e-2, 1e-1, 1, 10]:
    # Lets turn off verbose output this time to avoid clutter and just see the output.
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=filter_idx,
Exemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(description='test AD recognition')
    parser.add_argument('--input',
                        type=str,
                        required=True,
                        help="path to test data")
    parser.add_argument('--model',
                        type=str,
                        required=True,
                        help="path to pre-trained model")
    parser.add_argument('--id', type=int, required=True, help="data id")
    args = parser.parse_args()

    model_dir = os.path.join(os.path.dirname(os.getcwd()), args.model)
    data = loaddata(args.input, 'testa.h5')
    print('data_shape:{}'.format(data.shape))
    print("[INFO] loading pre-trained network...")
    json_file = open(model_dir + 'AD_3dcnnmodel.json', 'r')
    model_json = json_file.read()
    json_file.close()
    model = model_from_json(model_json)
    # load weights into new model
    model.load_weights(model_dir + "AD_3dcnnmodel.hd5")
    model.summary()

    layer_idx = utils.find_layer_idx(model, 'activation_10')
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    grads = visualize_saliency(model,
                               layer_idx,
                               filter_indices=0,
                               seed_input=data[args.id],
                               backprop_modifier='guided',
                               keepdims=True)
    grads = np.squeeze(grads, axis=0)
    fig, ax = plt.subplots(nrows=1, ncols=3)
    ax[0].set_axis_off()
    ax[1].set_axis_off()
    ax[2].set_axis_off()
    extent1 = 0, grads.shape[1], 0, grads.shape[0]
    extent2 = 0, grads.shape[2], 0, grads.shape[1]
    extent3 = 0, grads.shape[2], 0, grads.shape[0]
    ax[0].imshow(data[args.id][0, :, :, 45],
                 cmap='gray',
                 interpolation='nearest',
                 extent=extent1)
    ax[0].imshow(grads[:, :, 45],
                 cmap='hot',
                 alpha=.5,
                 interpolation='bilinear',
                 extent=extent1)
    ax[0].set_title('A')
    ax[1].imshow(data[args.id][0, 39, :, :],
                 cmap='gray',
                 interpolation='nearest',
                 extent=extent2)
    ax[1].imshow(grads[39, :, :],
                 cmap='hot',
                 alpha=.5,
                 interpolation='bilinear',
                 extent=extent2)
    ax[1].set_title('C')
    ax[2].imshow(data[args.id][0, :, 60, :],
                 cmap='gray',
                 interpolation='nearest',
                 extent=extent3)
    ax[2].imshow(grads[:, 60, :],
                 cmap='hot',
                 alpha=.5,
                 interpolation='bilinear',
                 extent=extent3)
    ax[2].set_title('S')
    plt.show()
    import pdb
    pdb.set_trace()
    volume = np.squeeze(grads, axis=0)
    volume = np.swapaxes(volume, 0, 2)
    r, c = volume.shape[1], volume.shape[2]

    # Define frames
    nb_frames = volume.shape[0]

    fig = go.Figure(frames=[
        go.Frame(
            data=go.Surface(z=((nb_frames - 1) * 0.1 - k * 0.1) *
                            np.ones((r, c)),
                            surfacecolor=np.flipud(volume[nb_frames - 1 - k]),
                            cmin=0,
                            cmax=1),
            name=str(
                k
            )  # you need to name the frame for the animation to behave properly
        ) for k in range(nb_frames)
    ])

    # Add data to be displayed before animation starts
    fig.add_trace(
        go.Surface(z=(nb_frames - 1) * 0.1 * np.ones((r, c)),
                   surfacecolor=np.flipud(volume[nb_frames - 1]),
                   colorscale='hot',
                   cmin=0,
                   cmax=1,
                   colorbar=dict(thickness=20, ticklen=4)))

    def frame_args(duration):
        return {
            "frame": {
                "duration": duration
            },
            "mode": "immediate",
            "fromcurrent": True,
            "transition": {
                "duration": duration,
                "easing": "linear"
            },
        }

    sliders = [{
        "pad": {
            "b": 10,
            "t": 60
        },
        "len":
        0.9,
        "x":
        0.1,
        "y":
        0,
        "steps": [{
            "args": [[f.name], frame_args(0)],
            "label": str(k),
            "method": "animate",
        } for k, f in enumerate(fig.frames)],
    }]

    # Layout
    fig.update_layout(
        title='Transverse View Saliency',
        width=600,
        height=600,
        scene=dict(
            zaxis=dict(range=[-0.1, nb_frames * 0.1], autorange=False),
            aspectratio=dict(x=1, y=1, z=1),
        ),
        updatemenus=[{
            "buttons": [
                {
                    "args": [None, frame_args(50)],
                    "label": "▶",  # play symbol
                    "method": "animate",
                },
                {
                    "args": [[None], frame_args(0)],
                    "label": "◼",  # pause symbol
                    "method": "animate",
                },
            ],
            "direction":
            "left",
            "pad": {
                "r": 10,
                "t": 70
            },
            "type":
            "buttons",
            "x":
            0.1,
            "y":
            0,
        }],
        sliders=sliders)

    fig.show()
Exemplo n.º 8
0
def main(model_path, image_file_path):
    image_process = ImageProcess()

    image = cv2.imread(image_file_path)

    # if collected data is not cropped then crop here
    # otherwise do not crop.
    if Config.data_collection['crop'] is not True:
        image = image[Config.data_collection['image_crop_y1']:Config.
                      data_collection['image_crop_y2'],
                      Config.data_collection['image_crop_x1']:Config.
                      data_collection['image_crop_x2']]

    image = cv2.resize(image, (Config.neural_net['input_image_width'],
                               Config.neural_net['input_image_height']))
    image = image_process.process(image)

    drive_run = DriveRun(model_path)
    measurement = drive_run.run((image, ))
    """ grad modifier doesn't work somehow
    fig, axs = plt.subplots(1, 3)
    fig.suptitle('Saliency Visualization' + str(measurement))
    titles = ['left steering', 'right steering', 'maintain steering']
    modifiers = [None, 'negate', 'small_values']

    for i, modifier in enumerate(modifiers):
        layer_idx = utils.find_layer_idx(drive_run.net_model.model, 'conv2d_last')
        heatmap = visualize_cam(drive_run.net_model.model, layer_idx, 
                    filter_indices=None, seed_input=image, backprop_modifier='guided', 
                    grad_modifier=modifier)

        axs[i].set(title = titles[i])
        axs[i].imshow(image)
        axs[i].imshow(heatmap, cmap='jet', alpha=0.3)
    """
    plt.figure()
    #plt.title('Saliency Visualization' + str(measurement))
    plt.title('Steering Angle Prediction: ' + str(measurement[0][0]))
    layer_idx = utils.find_layer_idx(drive_run.net_model.model, 'conv2d_last')
    heatmap = visualize_cam(drive_run.net_model.model,
                            layer_idx,
                            filter_indices=None,
                            seed_input=image,
                            backprop_modifier='guided')

    plt.imshow(image)
    plt.imshow(heatmap, cmap='jet', alpha=0.5)

    # file name
    loc_slash = image_file_path.rfind('/')
    if loc_slash != -1:  # there is '/' in the data path
        image_file_name = image_file_path[loc_slash + 1:]

    saliency_file_path = model_path + '_' + image_file_name + '_saliency.png'
    saliency_file_path_pdf = model_path + '_' + image_file_name + '_saliency.pdf'

    plt.tight_layout()
    # save fig
    plt.savefig(saliency_file_path, dpi=150)
    plt.savefig(saliency_file_path_pdf, dpi=150)

    print('Saved ' + saliency_file_path + ' & .pdf')
def vis(model, args):
    from vis.utils import utils
    from keras import activations

    # Utility to search for layer index by name.
    # Alternatively we can specify this as -1 since it corresponds to the last layer.
    # Anyway, we are interested in the last layer, where the prediction happens
    layer_idx = utils.find_layer_idx(model, 'predictions')

    #To visualize activation over final dense layer outputs, we need to switch the softmax activation out for linear
    #since gradient of output node will depend on all the other node activations.
    # Swap softmax with linear
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    layer_idx = args.layer_idx

    #We define the softmax function to translate the output of the CNN into a probability for each class.
    def softmax(x):
        """
        Compute softmax values for each sets of scores in x.
        
        Rows are scores for each class. 
        Columns are predictions (samples).
        """
        scoreMatExp = np.exp(np.asarray(x))
        return scoreMatExp / scoreMatExp.sum(0)

    def predictImage(args):
        from os.path import basename
        load_count = args.cpc
        (x_test, y_test, imgfiles) = load_xray_test(args, load_count)
        images = []
        outs = []

        if not args.noshowpredict:
            #plt.figure()
            f, ax = plt.subplots(nb_classes, load_count, figsize=(15, 15))
            ax = ax.reshape((nb_classes * load_count))
            plt.suptitle('predicted classes')

        i = 0
        for im, real_y, fn in zip(x_test, y_test, imgfiles):
            images.append(im)
            out = softmax(
                model.predict(
                    im.reshape(-1, args.input_size, args.input_size,
                               3).astype('float32') / 255.)[0])
            print(out)
            print(fn)
            outs.append(out)
            classKey = np.argmax(out)

            # Look in the dictionary for the specific term for the image identification.
            certainty = out[classKey]

            # green to gray
            #from skimage.color import rgb2gray
            #im=rgb2gray(im)

            if not args.noshowpredict:
                if len(y_test) > 1:
                    ax[i].imshow(im / 255.)
                    ax[i].set_title(
                        basename(fn) + " pred: " + str(classKey) + '(' +
                        str(round(certainty * 100, 3)) + '%)')
                    i += 1
                else:
                    ax.imshow(im / 255.)
                    ax.set_title(
                        basename(fn) + " pred: " + str(classKey) + '(' +
                        str(round(certainty * 100, 3)) + '%)')
        return images, outs

    images, outs = predictImage(args)
    if not args.noshowpredict:
        plt.show()
    if args.vis == "act" or args.vis == "all":
        show_activation(model, layer_idx)
    elif args.vis == "sal" or args.vis == "all":
        show_saliency(model, layer_idx, images, outs)
    elif args.vis == "cam" or args.vis == "all":
        show_cam(model, layer_idx, images, outs)
    elif args.vis == "salcam" or args.vis == "all":
        sal = show_saliency(model, layer_idx, images, outs)
        cam = show_cam(model, layer_idx, images, outs)
        show_salcam(sal, cam, images, outs)
Exemplo n.º 10
0
    # Utility to overlay text on image.
    img = utils.draw_text(img, 'Filter {}'.format(idx))
    new_vis_images.append(img)

# Generate stitched image palette with 5 cols so we get 2 rows.
stitched = utils.stitch_images(new_vis_images, cols=5)
plt.figure()
plt.axis('off')
plt.imshow(stitched)
plt.show()

from vis.visualization import get_num_filters

selected_indices = []
for layer_name in ['block_6_conv_3']:
    layer_idx = utils.find_layer_idx(model, layer_name)

    # Visualize all filters in this layer.
    filters = np.random.permutation(get_num_filters(
        model.layers[layer_idx]))[:10]
    selected_indices.append(filters)

    # Generate input image for each filter.
    vis_images = []
    for idx in filters:
        img = visualize_activation(model, layer_idx, filter_indices=idx)

        # Utility to overlay text on image.
        img = utils.draw_text(img, 'Filter {}'.format(idx))
        vis_images.append(img)
Exemplo n.º 11
0
import math
from scipy.ndimage import convolve

import warnings

warnings.filterwarnings('ignore')

# In[2]:

model = load_model('1/weights-improvement-47-0.78.h5')

model.summary()

# In[3]:

layer_idx = utils.find_layer_idx(model, 'dense_1')
model.layers[layer_idx].activation = activations.linear
modelnew = utils.apply_modifications(model)
penultimate_layer = utils.find_layer_idx(modelnew, 'add_12')

# In[90]:

img1 = utils.load_img('1344_1_D_CC_R.png', target_size=(450, 450))
mean = img1.mean()
std = img1.std()
img1 = (img1 - mean) / std
print(img1.shape)

scipy.misc.imsave('1344_1_D_CC_R_450.png', img1)
img1_resized = img1[np.newaxis, ...]
img1_resized = img1_resized[..., np.newaxis]
Exemplo n.º 12
0
def get_importance(tab_dir,
                   fig_dir,
                   model_info,
                   label_node,
                   label,
                   x_sample,
                   softmax=False,
                   save=True,
                   fn=None,
                   in_min=0.,
                   in_max=1.,
                   act_max_weight=1,
                   tv_weight=10.,
                   lp_norm_weight=10.,
                   max_iter=200,
                   backprop_modifier=None,
                   grad_modifier='absolute'):
    """

    Args:
        tab_dir: dir to save tabular data
        fig_dir: dir to save fig data
        model_info: output of get_model_data()
        label_node:
        label: name of label for output files
        softmax: was softmax used?
        save: save info?
        in_min:
        in_max:
        act_max_weight:
        tv_weight:
        lp_norm_weight:
        max_iter:
        x_sample:
        backprop_modifier:
        grad_modifier:

        for others see visualize_activation() from kera-vis

    Returns: dataframe, rows are genes, col is importance

    """

    # model
    # classes = model_info['le'].classes_
    #
    # print('original class names: ', classes)  # check classes
    # print('transformed class names: ', model_info['le'].transform(classes))
    model = model_info.get('nn')
    layer_idx = utils.find_layer_idx(model, 'preds')  # layer of interest
    if softmax:
        model.layers[
            layer_idx].activation = activations.linear  # swap softmax with linear
        model = utils.apply_modifications(model)

    filter_idx = label_node  # moe of interest, output node we want to maximize.

    print('getting saliency')
    # print(x_sample.shape)
    sals = visualize_saliency(model=model,
                              layer_idx=layer_idx,
                              filter_indices=filter_idx,
                              seed_input=x_sample,
                              backprop_modifier=backprop_modifier,
                              grad_modifier=grad_modifier)

    sals = sals.reshape(len(model_info.get('gene_ids')))
    sals = pd.DataFrame(data=sals,
                        index=model_info.get('gene_ids'),
                        columns=['activations'])

    if save:
        sals.to_csv(os.path.join(tab_dir, fn + '.csv'))  # save activations
        a = sals.values
        plt.figure()
        plt.hist(a)
        plt.savefig(os.path.join(fig_dir, fn + '.tiff'))

    return sals
Exemplo n.º 13
0
batch_x = H5_file['data'][:]
H5_file.close()
batch_x = np.transpose(batch_x, (1, 2, 0))
data_input_c[0, :, :, :, 0] = batch_x[:, :, :]


# 保存或加載權重
# pre = model.predict_on_batch(data_input_c)
# model.save('G:\qweqweqweqwe\model.h5')
model.load_weights('G:\qweqweqweqwe\model.h5')

# 查看計算圖
print(model.summary())

# 查询自己要做saliency的評分层的index:即求梯度時候的因變量層,一般選最後一個全連接層
layer_idx = utils.find_layer_idx(model, 'predictions')
# 查詢要可視化的層的index:即加權的特征圖所在的層,也是求梯度時候的自變量,一般選最後一個卷積層或池化層
layer_idx_conv = utils.find_layer_idx(model, 'block2_pool')


# 按照套路,首先將評分層激活函數轉換為線性激活
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)

# 開始CAM
# cam方法的激活 ============================================================================================================
# visualize_cam()用法:
# model:即你想可视化的keras模型
# layer_index:The layer index within `model.layers` whose filters needs to be visualized.
#              官方注释如上,按我的理解,一般这个层应该指定为最后一个全连接层,即最终softmax激活层的前一层,
#              这层的神经元数量等于总类别数,并且各个神经元的输出值相当于对应的每个类别的评分(评分经过softmax激活之后
Exemplo n.º 14
0
]

xloc = np.arange(25)
yloc = np.arange(23)
yloc = yloc[::-1]
for i in range(len(dtints)):
    dtints[i] = round(dtints[i], 3)
yloc = yloc - 0.5
xloc = xloc - 0.5

thres = np.where(prob >= 0.8)
ind_thres = thres[0]
class_thres = thres[1]

#plt.rcParams['figure.figsize'] = (18, 6)
layer_idx = utils.find_layer_idx(model, 'preds')
penultimate_layer_idx = utils.find_layer_idx(model, 'conv2d2')

# os.mkdir("/gdrive/My Drive/workflow/periodic/code/experiments/cnn/keras-vis/grad_CAM/dmdt/")
# os.mkdir("/gdrive/My Drive/workflow/periodic/code/experiments/cnn/keras-vis/grad_CAM/gradcam/")
###os.mkdir("keras-vis/")
###os.mkdir("keras-vis/grad_CAM/")
###os.mkdir("keras-vis/grad_CAM/dmdt/")
###os.mkdir("keras-vis/grad_CAM/gradcam/")

###os.mkdir("keras-vis/saliency/")

for i in range(len(classes)):
    # This is the output node we want to maximize.
    filter_idx = filterid[classes[i]]
    ###os.mkdir("keras-vis/grad_CAM/dmdt/"+pd[classes[i]]+"/")
Exemplo n.º 15
0
from keras.applications import VGG16
from vis.utils import utils
from keras import activations
from vis.visualization import visualize_activation
from matplotlib import pyplot as plt
from vis.input_modifiers import Jitter
import numpy as np
import os
import cv2

categorias = np.random.permutation(1000)[:15]

model = VGG16(weights='imagenet', include_top=True)
layer_idx = utils.find_layer_idx(
    model, 'predictions')  # -1 tambien vale (es la ultima)

#Softmax a linear

model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)

if not os.path.isdir("fc"):
    os.mkdir("fc")

for categoria in categorias:
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=categoria,
                               max_iter=500,
                               input_modifiers=[Jitter(16)])
    img = utils.draw_text(img, utils.get_imagenet_label(categoria))
Exemplo n.º 16
0
# score = model.evaluate(x_test, y_test, verbose=0)
# print('Test loss:', score)
# print('Test accuracy:', score)

class_idx = 0
indices = np.where(y_test[:, class_idx] == 1.)[0]

# pick some random input from here.
idx = indices[0]

# Lets sanity check the picked image.
cv2.imwrite("test/raw.jpg", x_test[idx][0] * 255.)
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'predictions')

# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)

penultimate_layer = utils.find_layer_idx(model, 'block5_conv3')

for modifier in [None, 'guided', 'relu']:
    plt.suptitle("vanilla" if modifier is None else modifier)
    for i, img in enumerate([x_test[indices[0]], x_test[indices[1]]]):
        grads = visualize_cam(model,
                              layer_idx,
                              filter_indices=0,
                              seed_input=img,
                              penultimate_layer_idx=penultimate_layer,
Exemplo n.º 17
0
IMG_DIR = r'/content/result_images'  # img-directory for saving results
'''lower this in case you run into problems (L.75-78)'''
analyzer_batchsize = 32
'''dont change the following unless you have a custom cnn-structure'''
name_softmax_layer = 'predictions'  # used for vis-functions
name_last_conv_layer = 'block2_conv2'  # used for gradcam-function
layerlist_for_layered_actmax = [
    'block1_conv1', 'block1_conv2', 'block1_pool', 'block2_conv1',
    'block2_conv2', 'block2_pool', 'fc1', 'fc2'
]  # used for layered actmax-function
'''settable parameters end'''
'''loading and reshaping data'''
print('loading data...')
model = keras.models.load_model(model_PATH)
linear_model = keras.models.load_model(model_PATH)
linear_model.layers[utils.find_layer_idx(
    linear_model, 'predictions')].activation = keras.activations.linear
linear_model = utils.apply_modifications(linear_model)

X_train = np.load(X_train_PATH)
X_train = X_train / 255
y_train = np.load(y_train_PATH)

X_test = np.load(X_test_PATH)
X_test = X_test / 255
y_test = np.load(y_test_PATH)
print('loaded data...')

y_test = keras.utils.to_categorical(
    y_test)  # reshapes labels to categorical format
IMG_SIZE = X_test.shape[
    1]  # sets to image dimensions for plotting equal to those of used data
def saliency_adv(model, data, labels, y_attack, eps_num, adv_all_eps):

    from vis.utils import utils
    from vis.visualization import visualize_saliency
    import scipy.ndimage as ndimage
    import os

    adv_all_eps = np.asarray(adv_all_eps, dtype='float32')

    cwd = os.getcwd()

    adversarial_data_predictions = model.predict(adv_all_eps)
    #print('predictions on adversarial data')
    #print(adversarial_data_predictions)

    clean_data_predictions = model.predict(data)
    #print('predictions on clean data')
    #print(clean_data_predictions)

    #most_likely_label = outputs.argmax()
    most_likely_label_adv = np.argmax(adversarial_data_predictions, axis=1)

    most_likely_label_clean = np.argmax(clean_data_predictions, axis=1)

    # get index of classification layer
    #model.layers[-1].activation = keras.activations.linear
    #layer_index = utils.find_layer_idx(model, model.layers[-1])
    layer_index = utils.find_layer_idx(model, 'dense_1')

    # Swap softmax with linear
    model.layers[layer_index].activation = keras.activations.linear
    model = utils.apply_modifications(model)

    for i in range(len(eps_num)):
        print('epsilon values in saliency function: {}'.format(eps_num))
        adv_single_eps = adv_all_eps[i * test_size:test_size * (i + 1)]
        attack_labels_single_eps = most_likely_label_adv[i *
                                                         test_size:test_size *
                                                         (i + 1)]
        adv_pred_proba = adversarial_data_predictions[i * test_size:test_size *
                                                      (i + 1)]

        test_idx = [0, 150]
        for idx in test_idx:
            #for idx in range(300):

            attn_map_clean = visualize_saliency(
                model,
                layer_index,
                filter_indices=most_likely_label_clean[idx],
                #filter_indices = None,
                seed_input=data[idx])

            gaussian_attn_map_clean = ndimage.gaussian_filter(attn_map_clean,
                                                              sigma=5)

            attn_map_adv = visualize_saliency(
                model,
                layer_index,
                filter_indices=attack_labels_single_eps[idx],
                #filter_indices = None,
                seed_input=adv_single_eps[idx])

            gaussian_attn_map_adv = ndimage.gaussian_filter(attn_map_adv,
                                                            sigma=5)

            if (attack_labels_single_eps[idx] == 0):
                predicted_class_name_adv = 'Normal'
            elif (attack_labels_single_eps[idx] == 1):
                predicted_class_name_adv = 'Pneumonia'

            if (most_likely_label_clean[idx] == 0):
                predicted_class_name_clean = 'Normal'
            elif (most_likely_label_clean[idx] == 1):
                predicted_class_name_clean = 'Pneumonia'

            if (labels[idx] == 0):
                true_class_name = 'Normal'
            elif (labels[idx] == 1):
                true_class_name = 'Pneumonia'

            if (y_attack[idx] == 0):
                target_attack_class = 'Normal'
            elif (y_attack[idx] == 1):
                target_attack_class = 'Pneumonia'

            adversarial_highest_score = adv_pred_proba[idx].max()
            print(adv_pred_proba[idx])
            adversarial_highest_score = adversarial_highest_score * 100
            print('highest accuracy score on adversarial data for index {}'.
                  format(idx))
            adversarial_highest_score = round(adversarial_highest_score, 1)
            print(adversarial_highest_score)

            clean_highest_score = clean_data_predictions[idx].max()
            print(clean_data_predictions[idx])
            clean_highest_score = clean_highest_score * 100
            print(
                'highest predicted accuracy score on clean data for index {}'.
                format(idx))
            clean_highest_score = round(clean_highest_score, 1)
            print(clean_highest_score)

            if (idx == 0) | (idx == 150):

                fig = plt.figure(figsize=(12, 12))
                fig.suptitle('True Class: {}\nEpsilon: {}'.format(
                    true_class_name, eps_num[i]),
                             size=25)
                gs1 = gridspec.GridSpec(2, 2)
                gs1.update(wspace=0.1, hspace=0.1)

                ax1 = plt.subplot(gs1[0, 0])
                ax1.set_title('Clean: {}'.format(predicted_class_name_clean),
                              size=25)
                ax1.imshow(data[idx])
                ax1.text(0.04,
                         0.05,
                         '{}%'.format(clean_highest_score),
                         color="white",
                         size=25,
                         bbox=dict(facecolor='green', alpha=0.8),
                         transform=ax1.transAxes,
                         horizontalalignment='left')

                ax2 = plt.subplot(gs1[0, 1])
                ax2.set_title('Clean Saliency', size=25)
                ax2.imshow(data[idx])
                ax2.imshow(gaussian_attn_map_clean, cmap="jet", alpha=.7)

                ax3 = plt.subplot(gs1[1, 0])
                ax3.set_title(
                    'Adversarial: {}'.format(predicted_class_name_adv),
                    size=25)
                ax3.imshow(adv_single_eps[idx])
                ax3.text(0.04,
                         0.05,
                         '{}%'.format(adversarial_highest_score),
                         color="white",
                         size=25,
                         bbox=dict(facecolor='red', alpha=0.8),
                         transform=ax3.transAxes,
                         horizontalalignment='left')
                #axes[1,0].text(0,0, '{}%'.format(predicted_accuracy), bbox=dict(facecolor='red', alpha=0.7), fontsize=10, horizontalalignment='center', verticalalignment='center', transform = axes[1,0].transAxes)

                ax4 = plt.subplot(gs1[1, 1])
                ax4.set_title('Adversarial Saliency', size=25)
                ax4.imshow(adv_single_eps[idx])
                ax4.imshow(gaussian_attn_map_adv, cmap="jet", alpha=.7)

                ax1.axis('off')
                ax2.axis('off')
                ax3.axis('off')
                ax4.axis('off')

                plt.show()

                fig.savefig(cwd +
                            "/pgd_saliency/pdf/epsilon_{}_index_{}.pdf".format(
                                eps_num[i], idx))
                plt.clf()

    return
Exemplo n.º 19
0
# # 另外Jitter的參數如果大於1則代表像素個數(或體素個數),如果小於1大於0則代表圖像size的比例(乘以長和寬)
# max_activition_better = visualize_activation(model, layer_idx_fc, filter_indices=1, max_iter=5000, input_modifiers=[Jitter(8)], verbose=True)
# max_activition_better = np.squeeze(max_activition_better, axis=-1)
# for ii in range(16):
#     plt.figure()
#     max_activition_piece = max_activition_better[:, :, ii]
#     plt.imshow(max_activition_piece, cmap=plt.cm.jet)
#     plt.show()
#
#
# # 我們還可以指定多各類,這樣最大激活input的觀感就有點像“四不像”,即多各類的物體組成的某種東西,很有趣
# max_activition_better = visualize_activation(model, layer_idx_fc, filter_indices=[0,1], max_iter=500, input_modifiers=[Jitter(16)], verbose=True)
#
#

layer_idx_conv = utils.find_layer_idx(model, 'predictions')
model.layers[layer_idx_conv].activation = activations.linear
model = utils.apply_modifications(model)
filter_indices = 392
# 卷積層的卷積核的最大激活input===========================================================================
# 觀察第一個卷積層的第2個卷積核(filter_indices=1)的最大激活input,這個最大激活input即可表示這個卷積核所關注並提取的特征(人為可理解的)
# data_input_c[:,:,:,0]=max_activition_norm
# max_activition_norm = visualize_activation(model, layer_idx_conv, filter_indices=filter_indices, max_iter=1000, input_modifiers=[Jitter(9)],  verbose=True,seed_input=data_input_c)
max_activition_norm = visualize_activation(model,
                                           layer_idx_conv,
                                           filter_indices=filter_indices,
                                           max_iter=5000,
                                           input_modifiers=[Jitter(16)],
                                           verbose=True)
# max_activition_norm = np.squeeze(max_activition_norm, axis=-1)
for ii in range(1):
def _main():
    # Load settings or use args or defaults
    s = load_settings_or_use_args(FLAGS)
    img_mode = s['img_mode']

    all_imgs = []

    # load image
    img = cv2.imread(img_name, cv2.IMREAD_COLOR)
    img = cv2.resize(img, out_size, interpolation=cv2.INTER_AREA)
    img_s = cv2.resize(img.copy(), (s['img_width'], s['img_height']), interpolation=cv2.INTER_AREA)
    #img_s = util.central_image_crop(img_s, s['crop_img_height'], s['crop_img_width'])

    # prep array for input
    img_out = np.asarray(img_s, dtype=np.float32) * np.float32(1.0 / 255.0)
    if img_mode == "rgb":
        carry = np.array(img_out)[np.newaxis, ...].astype(np.float32)
    else:
        img_out = cv2.cvtColor(img_out, cv2.COLOR_BGR2GRAY)
        carry = np.array(img_out)[np.newaxis, :, :, np.newaxis]

    print(carry.shape)

    # Load json and create model
    json_model_path = os.path.join(FLAGS.experiment_rootdir, s['model_dir'], s['json_model_fname'])

    # Get weights paths
    weights_2_load = sorted(glob.glob(os.path.join(FLAGS.experiment_rootdir, s['model_dir'], 'model_weights*')), key=os.path.getmtime)

    # prep output directory
    outfolder = os.path.join(FLAGS.experiment_rootdir, 'vis_epochs')
    del_and_recreate_folder(outfolder)

    # iterate through trained models
    for i, weights_load_path in enumerate(weights_2_load):
        k.clear_session()
        model = get_model(json_model_path, weights_load_path)
        penultimate_layer = vutils.find_layer_idx(model, 'conv2d_9')

        img_out, dur = get_grad_cam(img, carry, model, penultimate_layer)

        # make and store predictions
        theta, p_t, img_out = make_prediction(img_out, carry, model)

        # ouput of results
        stats = ["Epoch: {:d}".format(i), "Predictions:", "[C: {:4.3f}] [SA: {:4.3f}]".format(float(p_t), float(theta))]

        # place output on image
        for idx, stat in enumerate(stats):
            text = stat.lstrip()
            cv2.putText(img_out, text, (0, 30 + (idx * 30)),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        1.0, (255, 255, 255), 2, lineType=30)

        # store img
        all_imgs.append(img_out.astype(np.uint8))

        # show output
        cv2.imshow('frame', img_out)
        if cv2.waitKey(10) & 0xFF == ord('q'):
            break

    # prep output video
    out_name = os.path.splitext(os.path.split(img_name)[1])[0] + ".mp4"
    output_name = os.path.join(outfolder, out_name)
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    video = cv2.VideoWriter(output_name, fourcc, 1, (out_size[0] + 20, out_size[1]))

    # write video
    for img in all_imgs:
        video.write(img)
    video.release()
val_loss_values = history_dict['val_loss']
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
f = open(train_info_record + 'AMP_acc_result.txt', 'a')
epochs = range(1, len(history_dict['acc']) + 1)
for i in range(0, len(history_dict['acc'])):
    f.write(
        'epoch:{}, train_loss:{:.4f}, train_acc:{:.2f}, val_loss:{:.4f}, val_acc:{:.2f}'
        .format(i + 1, loss_values[i], acc_values[i], val_loss_values[i],
                val_acc_values[i]) + '\n')
_, acc = model.evaluate(X_val, Y_val)
print(acc)

# saliency map
trained_model = load_model(train_info_record + "save_best_model")
layer_idx = utils.find_layer_idx(trained_model, 'prediction')
trained_model.layers[layer_idx].activation = activations.linear
new_model = utils.apply_modifications(trained_model)

indices = np.where(Y_test[:, 0] == 1.)[0]
idx = indices[0]

grads = visualize_saliency(
    new_model,
    layer_idx,
    filter_indices=0,
    seed_input=X_test[idx],
    backprop_modifier="guided",
)
print("1")
penultimate_layer = utils.find_layer_idx(new_model, 'conv3')
Exemplo n.º 22
0
batch_x = np.transpose(batch_x, (1, 2, 0))
data_input_c[0, :, :, :, 0] = batch_x[:, :, :]

# 保存或加載權重
# pre = model.predict_on_batch(data_input_c)
# model.save('G:\qweqweqweqwe\model.h5')
model.load_weights(
    filepath=
    '/data/XS_Aug_model_result/model_templete/qianyi/model_qianyi_new/2LEVEL/m_80000_model.h5',
    by_name=True)

# 查看計算圖
print(model.summary())

# 查询自己要做saliency的評分层的index:即求梯度時候的因變量層,一般選最後一個全連接層
layer_idx = utils.find_layer_idx(model, 'fc1')
# 查詢要可視化的層的index:即加權的特征圖所在的層,也是求梯度時候的自變量,一般選最後一個卷積層或池化層
layer_idx_conv = utils.find_layer_idx(model, 'L4_block3conv3')

# 按照套路,首先將評分層激活函數轉換為線性激活
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)

# 開始CAM
# cam方法的激活 ============================================================================================================
# visualize_cam()用法:
# model:即你想可视化的keras模型
# layer_index:The layer index within `model.layers` whose filters needs to be visualized.
#              官方注释如上,按我的理解,一般这个层应该指定为最后一个全连接层,即最终softmax激活层的前一层,
#              这层的神经元数量等于总类别数,并且各个神经元的输出值相当于对应的每个类别的评分(评分经过softmax激活之后
#              就变成概率值啦)。但是实际可以指定卷积层,这个需要在研究一下代码看看。
plt.figure(figsize=(20, 20))
cnf_matrix = confusion_matrix(validation_generator.classes, y_pred)

plt.imshow(cnf_matrix, interpolation='nearest')
plt.colorbar()
tick_marks = np.arange(len(classes))
_ = plt.xticks(tick_marks, classes, rotation=90)
_ = plt.yticks(tick_marks, classes)

# In[]
from vis.visualization import visualize_activation
from vis.utils import utils
from keras import activations
from matplotlib import pyplot as plt

plt.rcParams['figure.figsize'] = (18, 6)

# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'preds')

# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)

# This is the output node we want to maximize.
filter_idx = 0
img = visualize_activation(model, layer_idx, filter_indices=filter_idx)
plt.imshow(img[..., 0])
Exemplo n.º 24
0
		def get_conv_layer(self,conv_layer_name):

			conv_layer=utils.find_layer_idx(self.model, conv_layer_name)
			return conv_layer
Exemplo n.º 25
0
import keras
import numpy as np
import cv2
import sys
import os
from vis.utils import utils
from vis.visualization import get_num_filters

if not os.path.exists("single_neuron/block1_conv1"):
    os.makedirs("single_neuron/block1_conv1")

model = keras.applications.VGG16()

layer_idx = utils.find_layer_idx(model, 'block1_conv1')

num_filters = get_num_filters(model.layers[layer_idx])

print(model.layers[layer_idx].get_weights()[0].shape)

max_v = np.amax(model.layers[layer_idx].get_weights()[0])
min_v = np.amin(model.layers[layer_idx].get_weights()[0])

print(max_v)
print(min_v)

pesos = model.layers[layer_idx].get_weights()[0].copy()
if min_v < 0:
    pesos = pesos + abs(min_v)
    max_v = max_v + abs(min_v)

pesos = pesos * (255.0 / max_v)
Exemplo n.º 26
0
from keras.applications import ResNet50
from vis.utils import utils
from keras import activations

# Build the ResNet50 network with ImageNet weights
model = ResNet50(weights='imagenet', include_top=True)

# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'fc1000')

# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)

from vis.utils import utils
from matplotlib import pyplot as plt

plt.rcParams['figure.figsize'] = (18, 6)

img1 = utils.load_img('../vggnet/images/ouzel1.jpg', target_size=(224, 224))
img2 = utils.load_img('../vggnet/images/ouzel2.jpg', target_size=(224, 224))

f, ax = plt.subplots(1, 2)
ax[0].imshow(img1)
ax[1].imshow(img2)

from vis.visualization import visualize_saliency, overlay
from vis.utils import utils
from keras import activations
Exemplo n.º 27
0
topK_ndx = []
imagenet_ndx = []  # indexes into the softmax entries of final layer
for i, name in enumerate(class_names):
    ndx = topK_names.index(name)
    topK_ndx.append(ndx)
    imagenet_ndx.append(np.argwhere(preds[0] == topK_scores[ndx])[0][0])
# 282 = Tiger cat, 242 = Boxer (0 indexed)

img = utils.load_img(img_path, target_size=(224, 224))
N = len(class_names)
fig, ax = plt.subplots(1, N + 1)
ax[0].imshow(img)
ax[0].axis('off')

# Lets overlay the heatmap for each desired class onto original image.
for i in range(N):
    ndx = imagenet_ndx[i]
    layer_idx = utils.find_layer_idx(model, 'predictions')  # final layer
    grads = visualize_cam(model,
                          layer_idx,
                          filter_indices=ndx,
                          seed_input=img,
                          backprop_modifier='guided')
    jet_heatmap = np.uint8(cm.jet(grads)[..., :3] * 255)
    ax[i + 1].imshow(overlay(jet_heatmap, img))
    ax[i + 1].axis('off')
    ax[i + 1].set_title(class_names[i])
plt.show()

plt.savefig(os.path.join('figures', 'grad-cam-keras.pdf'))
Exemplo n.º 28
0
import numpy as np
rom matplotlib import pyplot as plt
from vis.utils import utils
from vis.visualization import visualize_cam, overlay


def plot_heatmap(arr, figsize=(15, 15)):
    n = arr.shape[0]
    fig, ax = plt.subplots(1, n, figsize=figsize)
    ax = ax.ravel()
    for i in range(n):
        ax[i].imshow(arr[i])
        ax[i].set_xticks([])
        ax[i].set_yticks([])


def attention_dia(idx):
    pre_img = x_train[idx]
    filter_idx = np.argmax(my_model.predict(pre_img[np.newaxis]))
    heatmap = visualize_cam(my_model, layer_idx, filter_indices=filter_idx, seed_input=pre_img, backprop_modifier='guided')
    
    return overlay(churches[idx], heatmap)


num_idx = 5
layer_idx = utils.find_layer_idx(my_model, 'predictions')
dia_idx = random.sample(range(100), 10)
dia_attention_maps = np.array(map(attention_dia, dia_idx))

plot_heatmap(dia_attention_maps)
Exemplo n.º 29
0
from matplotlib import pyplot as plt
from vis.input_modifiers import Jitter

model = load_model('my_cifar10_ep10.h5')
model.summary()
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
#layer_idx = utils.find_layer_idx(model, 'preds')
# Swap softmax with linear

plt.rcParams['figure.figsize'] = (50, 50)

# The name of the layer we want to visualize
# You can see this in the model definition.
layer_name = 'preds'
layer_idx = utils.find_layer_idx(model, layer_name)
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
# Visualize all filters in this layer.
filters = np.arange(get_num_filters(model.layers[layer_idx]))

#model.layers[layer_idx].activation = activations.linear
#model = utils.apply_modifications(model)

#This is the output node we want to maximize.
#Generate input image for each filter.
#for output_idx in filters[0:10]:
#    # Lets turn off verbose output this time to avoid clutter and just see the output.
#    img = visualize_activation(model, layer_idx, filter_indices=output_idx, input_range=(0., 1.),input_modifiers=[Jitter(16)])
#    plt.figure()
Exemplo n.º 30
0
# 1. read the model & pretrained weights
model = read_model("saved/final_model.txt", "saved/final_weights.txt")

# 2. print summary and plot model architecture
print(model.summary())
plot_model(model, to_file='model.png')

# 3. get some data to visualize through activations & normalize
(X_train, y_train), (X_test, y_test_orig) = cifar10.load_data()
X_test = X_test.astype('float32')
X_test = X_test / 255.0
y_test = np_utils.to_categorical(y_test_orig)

# 3. visualize
conv_layer_idx = utils.find_layer_idx(model, "conv2d_1")
conv_layer_idx2 = utils.find_layer_idx(model, "conv2d_2")
final_layer_idx = utils.find_layer_idx(model, "dense_2")

airplanes = get_class_data(X_test, y_test_orig, 0)
ships = get_class_data(X_test, y_test_orig, 8)
inputs = X_test[0:50]
input = ships[0:1]

# first conv layer
v2 = visualize_activations(inputs, model, conv_layer_idx, 32, 32)
v1 = visualize_activations(input, model, conv_layer_idx, 32, 32)

# second conv layer
v4 = visualize_activations(inputs, model, conv_layer_idx2, 10, 16)
v3 = visualize_activations(input, model, conv_layer_idx2, 10, 16)