Example #1
0
def get_saliency(model, negate=False):

    layer_idx = 17
    model.layers[layer_idx].activation = activations.linear
    model = ut.apply_modifications(model)
    num_samples = 80
    car_images = utils.get_train_images_by_category(utils.Labels.ship,
                                                    num_samples)
    gradssave = np.array([[0 for x in range(0, 32)] for y in range(0, 32)])
    for img in car_images:
        if negate:
            grads = visualize_saliency(
                model,
                layer_idx,
                filter_indices=utils.Labels.automobile,
                seed_input=img) - visualize_saliency(
                    model,
                    layer_idx,
                    filter_indices=utils.Labels.automobile,
                    seed_input=img,
                    backprop_modifier='guided')
        else:
            grads = visualize_saliency(model,
                                       layer_idx,
                                       filter_indices=utils.Labels.airplane,
                                       seed_input=img,
                                       backprop_modifier='relu')
        gradssave = gradssave + grads
    return gradssave
Example #2
0
def visualize_grad(model_weights, data):
	#model: trained model.
	#data: preprocessed image by keras.

	#Note: Put "name = 'predictions'" on the last layer of every model (layer that include 'softmax'). This layer will
	#be the one we gonna visualize to standardize architectures.

	#layer_idx = [idx for idx,layer in enumerate(model.layers) if layer.name == layer_name][0]
	layer_idx = -1

	#original = np.copy(recover(data))
	#cv2.imwrite(path.replace('/scripts/python', '') + '/heatmaps/' + 'original.png', original)

	img = np.reshape(data, (1,) + data.shape)
	prediction = predictData(model_weights, img)
	pred_class = np.argmax(prediction)
	print('image shape: ', img.shape)
	print('prediction: ', prediction)
	print('class: ', pred_class)

	#model_weights.layers[layer_idx].activation = activations.linear
	#model_weights = utils.apply_modifications(model_weights)

	heatmap_activation = visualize_activation(model_weights, layer_idx, filter_indices=pred_class, seed_input=img)
	#plt.rcParams['figure.figsize'] = (18, 6)
	heatmap_activation = np.squeeze(heatmap_activation, axis=2)
	#plt.imshow(heatmap_activation, cmap='jet')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'activation.png', heatmap_activation, cmap='jet')
	#plt.show()
	#cv2.imwrite(path.replace('/scripts/python', '') + '/heatmaps/' + 'activation.png', heatmap_activation)

	heatmap_saliency = visualize_saliency(model_weights, layer_idx, filter_indices=pred_class, seed_input=img)
	#plt.rcParams['figure.figsize'] = (18, 6)
	#heatmap_saliency = np.squeeze(heatmap_saliency, axis=2)
	#plt.imshow(heatmap_saliency, cmap='jet')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'saliency.png', heatmap_saliency, cmap='jet')
	#plt.show()	
	#cv2.imwrite(path.replace('/scripts/python', '') + '/heatmaps/' + 'saliency.png', heatmap_saliency)

	heatmap_saliency_g = visualize_saliency(model_weights, layer_idx, filter_indices=pred_class, seed_input=img, backprop_modifier='guided')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'saliency_guided.png', heatmap_saliency_g, cmap='jet')

	heatmap_saliency_r = visualize_saliency(model_weights, layer_idx, filter_indices=pred_class, seed_input=img, backprop_modifier='relu')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'saliency_relu.png', heatmap_saliency_r, cmap='jet')

	heatmap_cam = visualize_cam(model_weights, layer_idx, filter_indices=pred_class, seed_input=img)
	#plt.rcParams['figure.figsize'] = (18, 6)
	#heatmap_cam = np.squeeze(heatmap_cam, axis=2)
	#plt.imshow(heatmap_cam, cmap='jet')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'cam.png', heatmap_cam, cmap='jet')
Example #3
0
def generate_saliceny_map(show=True):
    """Generates a heatmap indicating the pixels that contributed the most towards
    maximizing the filter output. First, the class prediction is determined, then we generate heatmap
    to visualize that class.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    for path in ['../resources/ouzel.jpg', '../resources/ouzel_1.jpg']:
        seed_img = utils.load_img(path, target_size=(224, 224))
        pred_class = np.argmax(
            model.predict(np.array([img_to_array(seed_img)])))
        heatmap = visualize_saliency(model, layer_idx, [pred_class], seed_img)

        if show:
            cv2.imshow(
                'Saliency - {}'.format(utils.get_imagenet_label(pred_class)),
                heatmap)
            cv2.waitKey(0)
Example #4
0
    def activation_vis(layer_name, overlay_image):
        # The name of the layer we want to visualize
        # (see model definition in vggnet.py)
        layer_idx = [
            idx for idx, layer in enumerate(model.layers)
            if layer.name == layer_name
        ][0]

        # Images corresponding to tiger, penguin, dumbbell, speedboat, spider
        # image_paths = [
        #     "http://www.tigerfdn.com/wp-content/uploads/2016/05/How-Much-Does-A-Tiger-Weigh.jpg",
        #     "http://www.slate.com/content/dam/slate/articles/health_and_science/wild_things/2013/10/131025_WILD_AdeliePenguin.jpg.CROP.promo-mediumlarge.jpg",
        #     "https://www.kshs.org/cool2/graphics/dumbbell1lg.jpg",
        #     "http://tampaspeedboatadventures.com/wp-content/uploads/2010/10/DSC07011.jpg",
        #     "http://ichef-1.bbci.co.uk/news/660/cpsprodpb/1C24/production/_85540270_85540265.jpg"
        # ]

        # Predict the corresponding class for use in `visualize_saliency`.
        seed_img = utils.load_img(overlay_image, target_size=target_size)
        pred_class = np.argmax(
            model.predict(np.array([img_to_array(seed_img)])))

        # Here we are asking it to show attention such that prob of `pred_class` is maximized.
        heatmap = visualize_saliency(model, layer_idx, [pred_class], seed_img)

        filename = general_dict["model_definition_id"] + "_" + general_dict["model_training_id"] + \
                   "_" + layer_name + "_" + overlay_image
        print "Saving activation map for layer %s overlaid onto image %s" % (
            layer_name, overlay_image)
        imsave(os.path.join(activation_maps_dir, filename), heatmap, 'png')
Example #5
0
    def plot(self, img, model, layer_name):
        """

            Parameters:
                img
                model (keras model instance) ResNet50
                layer_name (str)

            Return:
                self
        """
        layer = utils.find_layer(model, layer_name)
        model.layers[layer].activation = activations.linear
        model_mod = utils.apply_modifications(model)

        self.salmap = visualize_saliency(model_mod, layer, filter_indices=None,
                                   seed_input= img, backprop_modifier=None, \
                                   grad_modifier="absolute")
        plt.imshow(self.salmap)
        #        plt.savefig('SalMapRaw.png', dpi=300)

        self.salmap_guassian_smoothed = ndimage.gaussian_filter(
            self.salmap[:, :, 2], sigma=5)
        plt.imshow(img)
        plt.imshow(self.salmap_guassian_smoothed, alpha=.7)
        plt.axis('off')

        #        plt.savefig('SalMapScale.png', dpi=300)
        return self
Example #6
0
    def vis_saliency(self,
                     filter_index,
                     seed_input,
                     grad_modifier='absolute',
                     layer_index='default',
                     plot=False):
        """
        Args:
            filter_index: The label (1 or 0) which represents "S" or "R".
            seed_input: The image with shape of (27, 27 ,1).
            grad_modifier: Default value is "absolute", it can be changed by "relu" or "negate".
            layer_index: If "default", it will choose "preds" layer, or you can pass an integer less than 5.
            plot: If True, it will plot a figure of class saliency map.(Default is False)

        Returns:
            An numpy.array with the shape of 283*24*3 of a RGB image.
        """
        if layer_index == 'default':
            layer_index = utils.find_layer_idx(self.model, 'preds')
        img = visualize_saliency(self.model,
                                 layer_idx=layer_index,
                                 seed_input=seed_input,
                                 filter_indices=filter_index,
                                 grad_modifier=grad_modifier)
        if plot:
            plt.imshow(img.reshape([33, 40, -1]), cmap='jet')

        return img
Example #7
0
def get_gradients(conf, model, examples_batch, kerasvis=False):
    if kerasvis:
        from vis.visualization import visualize_saliency
    else:
        visualize_saliency = None  # Get rid of PyCharm warning
    subkey_gradients = []

    # Get number of outputs
    num_outputs = model.model.output.shape[1]

    # Get gradients for each subkey
    for neuron_index in range(0, num_outputs):
        if kerasvis is False:
            gradients = model.get_output_gradients(
                neuron_index,
                examples_batch,
                square_gradients=True,
                mean_of_gradients=conf.saliency_mean_gradient)
        else:
            gradients = np.zeros(examples_batch.shape)
            for i in range(0, examples_batch.shape[0]):
                gradients[i, :] = visualize_saliency(
                    model.model,
                    -1,
                    filter_indices=neuron_index,
                    seed_input=examples_batch[i, :])

        subkey_gradients.append(gradients)
    return subkey_gradients
def visualize_single(model, conv_name, image, show_activations = True):
    from keras import models

    conv_layer, idx_layer = next((layer.output, idx) for idx, layer in enumerate(model.layers) if
                                 layer.output.name.startswith(conv_name))

    act_model = models.Model(inputs=model.input, outputs=[conv_layer])

    if show_activations:
        layer_activations = act_model.predict([[image]])

        print("Activations:", layer_activations.shape, "Index: ", idx_layer, len(act_model.layers))
        col_act = []

        for pred_idx, act in enumerate(layer_activations):
            row_act = []

            for act_idx in range(act.shape[2]):
                row_act.append(act[:, :, act_idx])

            col_act.append(cv2.hconcat(row_act))

        plt.matshow(cv2.vconcat(col_act), cmap='viridis')
        plt.waitforbuttonpress()
        plt.close()

    conv_layer.activation = activations.linear
    sal_model = update_model(act_model)

    grads = visualize_saliency(sal_model, idx_layer, filter_indices=None, seed_input=image)
    plt.matshow(image)
    plt.imshow(grads, alpha=.6)
    plt.waitforbuttonpress()
    plt.close()
Example #9
0
    def predict(self, record):
        with self.graph.as_default():
            with self.session.as_default():
                indices = np.r_[0:6]
                X = np.transpose(
                    np.transpose(self.test_dict[record]["x"])[indices][:][:])
                #                 X = self.test_dict[record]["x"]
                X = np.expand_dims(X, 0)
                X = np.expand_dims(X, 0)
                X = rescale_array(X)
                # print(X.shape)
                Y_pred = self.model.predict(X)

                all_rows = []
                # print(Y_pred.shape)
                for i in range(len(Y_pred[0][0])):
                    row = {}
                    row["sleepstage"] = mapping[i]
                    row["value"] = Y_pred[0][0][i]
                    all_rows.append(row)

                with open('../frontend/data/SleepProb.json', 'w') as f:
                    f.write(
                        str(all_rows).replace('\'',
                                              '"').replace('None', 'null'))

                grads = visualization.visualize_saliency(self.model,
                                                         -2,
                                                         filter_indices=None,
                                                         seed_input=X)
                # print("Predicted values", Y_pred)
                Y_pred = Y_pred.argmax(axis=-1)
        return grads, Y_pred[0]
Example #10
0
def save_img(path, savepath, origimg, typeimg, layeridx):

    img = load_img(path, target_size=(224,224))
    x = img_to_array(img) #numpy array
    x = x.reshape(x.shape) #adds on dimension for keras

    model.layers[layeridx].activation = activations.linear
    if typeimg == 'activation':
        img = visualize_activation(model, layeridx, 20, x)

    if typeimg == 'saliency':
        img = visualize_saliency(model, layeridx, 1, x)

    if typeimg == 'cam':
        img = visualize_cam(model, layeridx, 1, x)

    if not os.path.exists('layer-' + savepath):
        os.makedirs('layer-' + savepath)

    if not os.path.exists('image-' + savepath):
        os.makedirs('image-' + savepath)

    combined = str(savepath) + '/' + str(origimg)
    plt.imshow(img)
    plt.savefig('layer-' + combined, dpi=600)
Example #11
0
    def extract_sal_weights(self,
                            img,
                            layer_idx=-1,
                            filter_indices=4,
                            give_up_ratio=0.5):
        img = img.reshape(1, self.img_rows, self.img_cols, self.img_chns)

        grads = visualize_saliency(self.model,
                                   layer_idx=layer_idx,
                                   filter_indices=filter_indices,
                                   seed_input=img)

        grads_flatten = grads.reshape(1, -1)
        #    print(grads_flatten.shape)
        grads_flatten_sort = np.argsort(grads_flatten)
        #    print(grads_flatten_sort.shape)

        value_threshold_idx = grads_flatten_sort[
            0, int(give_up_ratio * grads_flatten_sort.shape[1])]

        value_threshold = grads_flatten[0, value_threshold_idx]
        #    print(np.max(grads_flatten),value_threshold,np.min(grads_flatten))

        for i in range(grads.shape[0]):
            for j in range(grads.shape[1]):
                #   print('i:',i,'value_threshold',value_threshold)
                if grads[i, j] < value_threshold:
                    grads[i, j] = 0

    # =============================================================================
    #     extracted_zero_idx = grads_flatten_sort[:int(give_up_ratio * len(grads_flatten_sort))]
    #     grads_flatten[extracted_zero_idx] = 0
    # =============================================================================

        return grads
 def saliency(img):
   K.clear_session()
   model = build_model(base_weights, top_weights)
   return visualize_saliency(model, -1, None, img, 
     backprop_modifier=backprop_mod,
     grad_modifier=grad_mod,
     custom_objects=custom_objects)
Example #13
0
    def visualize_filters(model,
                          layer_name,
                          input_data,
                          filter_indices=None,
                          mode="guided"):
        from vis.visualization import get_num_filters, visualize_saliency
        from vis.utils import utils
        from vis.input_modifiers import Jitter
        """
        Visualize what pattern activates a filter. Helps to discover what a 
        filter might be computing
        :returns tuple(List, List) containing input images and heatmaps
                frames from each sample is stitched into a single image
        """
        get_num_filters
        inputs = []
        outputs = []
        # number of filters for this layer
        num_filters = get_num_filters(model.get_layer(layer_name))
        layer_idx = utils.find_layer_idx(model, layer_name)
        for sample in input_data:
            heatmaps = visualize_saliency(
                model,
                layer_idx,
                filter_indices=filter_indices,
                seed_input=sample,
                backprop_modifier=mode,
            )
            inputs.append(utils.stitch_images(sample, margin=0))
            outputs.append(utils.stitch_images(heatmaps, margin=0))

        return np.array(inputs), np.array(outputs)
Example #14
0
def visualize(img_dir):
    #TODO: test this method
    print('Loading Model...')
    model = keras.models.load_model(MODEL_PATH)
    model.layers.pop()
    model.add(Activation('linear'))
    utils.apply_modifications(model)
    for img_path in glob.glob(img_dir + '*.png'):
        print(f'Working on {img_path}')
        plt.figure()
        f, ax = plt.subplots(1, 4)
        img = load_img(img_path,
                       color_mode='grayscale',
                       target_size=(IMAGE_DIM, IMAGE_DIM),
                       interpolation='lanczos')
        background = img.convert('RGB')
        img = img_to_array(img)
        saliency_grads = visualize_saliency(model,
                                            -1,
                                            filter_indices=0,
                                            seed_input=img,
                                            backprop_modifier='guided')
        ax[0].imshow(background)
        ax[1].imshow(saliency_grads, cmap='jet')
        cam_grads = visualize_cam(model,
                                  -1,
                                  filter_indices=0,
                                  seed_input=img,
                                  backprop_modifier='guided')
        cam_heatmap = np.uint8(cm.jet(cam_grads)[..., :3] * 255)
        saliency_heatmap = np.uint8(cm.jet(saliency_grads)[..., :3] * 255)
        ax[2].imshow(overlay(saliency_heatmap, img_to_array(background)))
        ax[3].imshow(overlay(cam_heatmap, img_to_array(background)))
        plt.show()
Example #15
0
def main():

    if (len(sys.argv) != 2):
        print('Give the model path.')
        return

    drive = DriveRun(sys.argv[1])
    config = Config()
    csv_fname = '/home/mir-alb/Ninad_Thesis/Test/Test.csv'
    csv_header = ['image_fname', 'steering_angle']
    df = pd.read_csv(csv_fname, names=csv_header, index_col=False)
    num_data = len(df)
    text = open('/home/mir-lab/Ninad_Thesis/Test/Salient/Salient.txt', 'w+')
    bar = ProgressBar()
    image_process = ImageProcess()

    for i in bar(range(num_data)):
        image_name = df.loc[i]['image_fname']
        steering = df.loc[i]['steering_angle']
        image_path = '/home/mir-lab/Ninad_Thesis/Test/' + image_name + '.jpg'
        image = utils.load_img(image_path, target_size=(config.image_size[1],
                                                        config.image_size[0]))
        image = image_process.process(image)
        prediction = drive.run(image)
        text.write(str(image_name) + '\t' + str(steering) + '\t' + str(prediction))
        
        modifiers = [None, 'negate', 'small_values']
        for i, modifier in enumerate(modifiers):
            heatmap = visualize_saliency(drive.net_model.model, layer_idx=-1,
                                         filter_indices=0, seed_input=image,
                                         grad_modifier=modifier, keepdims=True)
            final = overlay(image, heatmap, alpha=0.5)
            cv2.imwrite('/home/mir-lab/Ninad_Thesis/Test/Salient/' + image_name + '_' + str(i) + '.jpg', final)
Example #16
0
def grad2(idxx):
    img = vid[idxx, :, :, :, :]
    grad_eval_by_hand = visualize_saliency(model,
                                           layer_idx=-1,
                                           filter_indices=0,
                                           seed_input=img,
                                           grad_modifier='negate',
                                           keepdims=True)
    grad_eval_by_hand2 = visualize_saliency(model,
                                            layer_idx=-1,
                                            filter_indices=0,
                                            seed_input=img,
                                            grad_modifier=None,
                                            keepdims=True)
    grad_eval_by_hand = grad_eval_by_hand2
    return img, grad_eval_by_hand, y_test
Example #17
0
def plotAttention(model, layer_idx, im, im_idx, label, fold,
                  output_folder_path):
    grads = visualize_saliency(
        model,
        layer_idx,
        filter_indices=None,
        seed_input=im,
        backprop_modifier="guided",
    )
    jet_heatmap = np.uint8(cm.jet(grads)[..., :3] * 255)
    jet_heatmap = cv2.cvtColor(jet_heatmap, cv2.COLOR_BGR2RGB)
    cv2.imwrite(
        os.path.join(output_folder_path,
                     label + "-" + fold + "-sm-" + str(im_idx) + ".png"),
        jet_heatmap,
    )
    grads = visualize_cam(
        model,
        layer_idx,
        filter_indices=None,
        seed_input=im,
        backprop_modifier="guided",
        penultimate_layer_idx=utils.find_layer_idx(model, "block5_pool"),
    )
    jet_heatmap = np.uint8(cm.jet(grads)[..., :3] * 255)
    jet_heatmap = cv2.cvtColor(jet_heatmap, cv2.COLOR_BGR2RGB)
    cv2.imwrite(
        os.path.join(output_folder_path,
                     label + "-" + fold + "-cam-" + str(im_idx) + ".png"),
        overlay(jet_heatmap, cv2.cvtColor(im, cv2.COLOR_GRAY2RGB), 0.2),
    )
Example #18
0
 def compute_visualisation_mask(self, img):
     grads = visualize_saliency(self.sal_model,
                                self.layer_idx,
                                filter_indices=None,
                                seed_input=img,
                                backprop_modifier='guided')
     return grads
def show_saliency(model, layer_idx, images, outs):
    from vis.visualization import visualize_saliency

    #plt.figure()
    f, ax = plt.subplots(nb_classes, args.cpc, figsize=(15, 15))
    ax = ax.reshape((len(images)))
    plt.suptitle('Saliency for predicted classes')

    # New output containing the output result for the saliency visualization
    gradsSaliency = []
    certainties = []
    classKeys = []

    for i, img in enumerate(images):
        classKey = np.argmax(outs[i])
        classKeys.append(classKey)
        certainty = outs[i][classKey]
        certainties.append(certainty)

        #grads = visualize_saliency(model, layer_idx, filter_indices=classKeys[i], seed_input=img, backprop_modifier='guided')
        grads = visualize_saliency(model,
                                   layer_idx,
                                   filter_indices=None,
                                   seed_input=img,
                                   backprop_modifier='guided')
        gradsSaliency.append(grads)

        ax[i].imshow(grads, cmap='jet')
        ax[i].set_title('pred:' + str(classKeys[i]) + '(' +
                        str(round(certainties[i] * 100, 3)) + ' %)')
    plt.show()
    return gradsSaliency
Example #20
0
def plot_multiple_saliency(images,
                           model,
                           layer,
                           filter_idx=None,
                           backprop_modifier=None,
                           grad_modifier=None):
    fig, ax = plt.subplots(2, len(images), figsize=(4 * len(images), 4))
    ax = ax.flatten()
    for i, filename in enumerate(images):
        image = load_img(filename,
                         target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE))
        ax[i].imshow(image)
        ax[i].axis('off')
    for i, filename in enumerate(images):
        grad = visualize_saliency(model,
                                  find_layer_idx(model, layer),
                                  filter_idx,
                                  normalize(image),
                                  backprop_modifier=backprop_modifier,
                                  grad_modifier=grad_modifier)
        image = load_img(filename,
                         target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE))
        ax[i + len(images)].imshow(overlay(grad, image))
        ax[i + len(images)].axis('off')
    return fig
def saliency(model, input_images, input_labels):
    """Function that computes the attention map visualization.
    Args:
        model: A keras.model
        input_images: Array of 3D images (height, width, 3) of which the attention is computed
        input_labels: The class label for each input image
    Returns:
        A list of attention maps
    """
    layer_idx = -1

    # Swap softmax with linear
    model.layers[-2].activation = activations.linear
    model = utils.apply_modifications(model)

    # This is the output node we want to maximize.
    vis_images = []
    for l in range(len(input_images)):
        img = input_images[l]
        label = input_labels[l]
        grads = visualize_saliency(model,
                                   layer_idx,
                                   filter_indices=label,
                                   seed_input=img)
        vis_images.append(grads)
    return vis_images
Example #22
0
def generate_saliceny_map(show=True):
    """Generates a heatmap indicating the pixels that contributed the most towards
    maximizing the filter output. First, the class prediction is determined, then we generate heatmap
    to visualize that class.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    for path in ['../resources/ouzel.jpg', '../resources/ouzel_1.jpg']:
        seed_img = utils.load_img(path, target_size=(224, 224))

        # Convert to BGR, create input with batch_size: 1, and predict.
        bgr_img = utils.bgr2rgb(seed_img)
        img_input = np.expand_dims(img_to_array(bgr_img), axis=0)
        pred_class = np.argmax(model.predict(img_input))

        heatmap = visualize_saliency(model, layer_idx, [pred_class], seed_img)
        if show:
            plt.axis('off')
            plt.imshow(heatmap)
            plt.title('Saliency - {}'.format(
                utils.get_imagenet_label(pred_class)))
            plt.show()
Example #23
0
def plt_saliency(model, img, ax, idx):
    """
    Plot saliency graph, which generates an image that represents
    the highest activation based on a seeding image;

    Reference: https://arxiv.org/pdf/1312.6034v2.pdf

    Args:
        model: Model to plot.
        img: Seed image.
        ax: Matplotlib axis.
        idx: Index of the plot to be shown on the axis.

    Returns: None

    """
    pred_layer_idx = vutils.find_layer_idx(model, "predictions")

    sal = vvis.visualize_saliency(
        model,
        pred_layer_idx,
        filter_indices=None,
        seed_input=img,
        # backprop_modifier='guided'
    )

    ax[idx].imshow(sal, cmap="jet")
    ax[idx].set_title("Saliency")
def main():
  #  dataset_str = 'rgb'
  dataset_str = 'smooth'

  dataset = MIT67Dataset(dataset_str)

  model = load_model('models/vgg16_hybrid_1365_softmax_mit67_' + dataset_str + '.h5')

  # first dense layer
  layer_idx = 19

  # choose image (indexed by unshuffled test set)
  img_idx = 0
  X,Y = dataset.test_data((224,224), 'rgb', 1)

  img = X[img_idx]

  # directory to save images to
  dir_name = 'mit67_' + dataset_str + '_saliency/'
  # save unaltered image for comparison
  imsave(dir_name + 'smooth_' + str(img_idx) + '.png', img)

  # try seveal filters at this layer
  nb_filters = 64

  for i in range(nb_filters):
    print(i)
    saliency = visualize_saliency(model, layer_idx, i, img)

    # name format: img_index+layer_index+filter_index
    imsave((dir_name + 'sal_' + str(img_idx) + 
      '_' + str(layer_idx) + '_' + str(i) + '.png'),
      saliency
    )
Example #25
0
    def vis_saliency(self, x_test, scores, img_idx, fname, output_path):
        json_fname = os.path.join(self.output_path, 'model.json')
        layer_idx = utils.find_layer_idx(self.model, 'predictions')
        self.model.layers[layer_idx].activation = activations.linear
        self.model = utils.apply_modifications(self.model)
        classlabel = ['PA', 'Genuine']

        for class_idx in range(len(classlabel)):

            seed_input = x_test[img_idx]
            fname_original = fname.split('/')[:-6] + [
                '3',
                'origin_image',
            ] + fname.split('/')[-4:]
            fname_original = '/'.join(fname_original)
            fname_original = "{}.png".format(
                os.path.splitext(fname_original)[0])
            img = cv2.imread(fname_original, cv2.IMREAD_COLOR)[:, :, ::-1]
            output_fname = os.path.join(
                output_path,
                'vis_saliency',
                classlabel[class_idx],
                os.path.relpath(fname, output_path).replace('../', ''),
            )
            output_fname = "{}.png".format(os.path.splitext(output_fname)[0])
            safe_create_dir(os.path.dirname(output_fname))
            grad_top = visualize_saliency(self.model, layer_idx, class_idx,
                                          seed_input)
            print('-- saving visualizations in', output_fname)
            self.plot_map(grad_top, img, classlabel[class_idx],
                          scores[img_idx, class_idx], output_fname)
Example #26
0
def boxing(img, label):
    model_path = "./models/resnet50_model.h5"
    model = load_model(model_path)
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == "dense_2"
    ][0]

    heatmap = visualize_saliency(model, layer_idx, np.expand_dims(label,
                                                                  axis=0), img)
    # heatmap = visualize_activation(model, layer_idx, np.expand_dims(2, axis=0), img)
    # heatmap = visualize_cam(model, layer_idx, np.expand_dims(230, axis=0), img)

    # plt.imshow(heatmap, cmap=plt.cm.jet)
    # plt.colorbar()
    # plt.tight_layout()
    # fig = plt.gcf()
    # plt.show()
    # fig.savefig( os.path.join("saliency_map/", IMG_ID +".png"), dpi=100)

    k_size = 28
    k = np.ones((k_size, k_size)) / k_size
    heatmap = signal.convolve2d(
        heatmap[:, :, 0], k, boundary='wrap', mode='same') / k.sum()

    # plt.imshow(heatmap, cmap=plt.cm.jet)
    # plt.show()

    threshold = heatmap.max() * 0.3

    maxTop = maxLeft = 999999999
    maxRight = maxBottom = -1
    for h in range(224):
        for w in range(224):
            # print(h,w)
            if heatmap[h][w] > threshold:
                if h < maxTop: maxTop = h
                if h > maxBottom: maxBottom = h
                if w < maxLeft: maxLeft = w
                if w > maxRight: maxRight = w

    maxTop = int(maxTop / 3)
    maxBottom = int(maxBottom / 3)
    maxLeft = int(maxLeft / 3)
    maxRight = int(maxRight / 3)

    for h in range(224):
        for w in range(224):
            if (int(h / 3) == maxTop and int(w / 3) in range(
                    maxLeft, maxRight)) or (int(h / 3) == maxBottom and int(
                        w / 3) in range(maxLeft, maxRight)) or (
                            int(w / 3) == maxRight
                            and int(h / 3) in range(maxTop, maxBottom)) or (
                                int(w / 3) == maxLeft
                                and int(h / 3) in range(maxTop, maxBottom)):
                img[h][w][0] = img[h][w][1] = 255
                img[h][w][2] = 0

    return img
Example #27
0
def main():
    parser = argparse.ArgumentParser(
        description='test AD recognition')
    parser.add_argument('--input', type=str, required=True, help="path to test data")
    parser.add_argument('--model', type=str, required=True, help="path to pre-trained model")
    parser.add_argument('--id', type=int, required=True, help="data id")
    args = parser.parse_args()

    model_dir = os.path.join(os.path.dirname(os.getcwd()), args.model)
    data = loaddata(args.input, 'testa.h5')
    print('data_shape:{}'.format(data.shape))
    print("[INFO] loading pre-trained network...")
    json_file = open(model_dir+'AD_3dcnnmodel.json', 'r')
    model_json = json_file.read()
    json_file.close()
    model = model_from_json(model_json)
    # load weights into new model
    model.load_weights(model_dir+"AD_3dcnnmodel.hd5")
    model.summary()

    layer_idx = utils.find_layer_idx(model, 'activation_10')
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    grads = visualize_saliency(model, layer_idx, filter_indices=0, seed_input=data[args.id], backprop_modifier='guided', keepdims=True)
    volume1 = np.squeeze(data[args.id], axis=0)
    volume1 = (volume1 - np.min(volume1)) / (np.max(volume1) - np.min(volume1))
    fig, axs = plt.subplots(1, 2, figsize=(16, 10), constrained_layout=True)
    axs[1].imshow(volume1[39,:,:], cmap='gray')
    plt.show()
    import pdb
    pdb.set_trace()
    volume2 = np.squeeze(grads, axis=0)
    vol= overlay(volume1, volume2)
    axial_planes = []
    for i in range(vol.shape[0]):
        axial_planes.append(vol[i,:,:])

    # Matplotlib animate heart
    Hz = np.zeros([vol.shape[1], vol.shape[2]])
    im = ax.imshow(Hz)

    def init():
        im.set_data(np.zeros(Hz.shape))
        return [im]

    def animate(i):
        im.set_data(axial_planes[i])
        im.autoscale()

        return [im]

    anim = animation.FuncAnimation(fig,
                                   animate,
                                   init_func=init,
                                   frames=len(axial_planes),
                                   interval=100,
                                   blit=True)
    plt.show()
Example #28
0
    def explain(self, image, target_class):
        # generate images with removed parts
        res = visualize_saliency(model=self.model,
                                 layer_idx=self.layer,
                                 filter_indices=target_class,
                                 seed_input=image)

        return res, None
Example #29
0
def get_gradient(x, model, num_imgs):
    grads_out = np.empty((num_imgs,128, 128, 128))
    for i in range(num_imgs):
        print(i)
        # 20 is the imagenet index corresponding to `ouzel`
        grads_out[i] = visualize_saliency(model, layer_idx=-1, filter_indices=None, 
                                seed_input=x[i], backprop_modifier=None)
    return(grads_out)
def get_guided_saliency(model, img1, layer_idx=None, predict_class=None):

    # https://raghakot.github.io/keras-vis/vis.visualization/#visualize_saliency
    #  If you are visualizing final keras.layers.Dense layer, consider switching 'softmax' activation for 'linear'
    # Swap softmax with linear
    # model.layers[layer_idx].activation = activations.linear
    # model = LIBS.apply_modifications(model)  # saliency maps no much difference

    if predict_class is None:
        prob = model.predict(np.expand_dims(img1, axis=0))
        predict_class = np.argmax(prob)

    if layer_idx is None:
        layer_idx = len(model.layers) - 1

    # Modifies backprop to only propagate positive gradients for positive activations.
    modifier = 'guided'  # ['guided', 'relu']
    if predict_class is not None:
        grads = visualize_saliency(model,
                                   layer_idx,
                                   filter_indices=[predict_class],
                                   seed_input=img1,
                                   backprop_modifier=modifier)
    else:
        grads = visualize_saliency(model,
                                   layer_idx,
                                   seed_input=img1,
                                   backprop_modifier=modifier)

    str_uuid = str(uuid.uuid1())
    filename = '/tmp/' + str_uuid + '.png'

    cam = cv2.applyColorMap(np.uint8(255 * grads), cv2.COLORMAP_JET)
    cv2.imwrite(filename, cam)

    # plt.title('Saliency Maps')
    # plt.imshow(grads, cmap='jet')
    # fig = plt.gcf()
    # fig.set_size_inches(3, 3)
    #
    #
    # fig.savefig(filename, dpi=100)  # fig.savefig('/tmp/test.png', dpi=100)
    # plt.close()

    return filename