Beispiel #1
0
def visualize(img_dir):
    #TODO: test this method
    print('Loading Model...')
    model = keras.models.load_model(MODEL_PATH)
    model.layers.pop()
    model.add(Activation('linear'))
    utils.apply_modifications(model)
    for img_path in glob.glob(img_dir + '*.png'):
        print(f'Working on {img_path}')
        plt.figure()
        f, ax = plt.subplots(1, 4)
        img = load_img(img_path,
                       color_mode='grayscale',
                       target_size=(IMAGE_DIM, IMAGE_DIM),
                       interpolation='lanczos')
        background = img.convert('RGB')
        img = img_to_array(img)
        saliency_grads = visualize_saliency(model,
                                            -1,
                                            filter_indices=0,
                                            seed_input=img,
                                            backprop_modifier='guided')
        ax[0].imshow(background)
        ax[1].imshow(saliency_grads, cmap='jet')
        cam_grads = visualize_cam(model,
                                  -1,
                                  filter_indices=0,
                                  seed_input=img,
                                  backprop_modifier='guided')
        cam_heatmap = np.uint8(cm.jet(cam_grads)[..., :3] * 255)
        saliency_heatmap = np.uint8(cm.jet(saliency_grads)[..., :3] * 255)
        ax[2].imshow(overlay(saliency_heatmap, img_to_array(background)))
        ax[3].imshow(overlay(cam_heatmap, img_to_array(background)))
        plt.show()
Beispiel #2
0
def visualize(model):
    from vis.visualization import visualize_activation
    from vis.utils import utils
    from keras import activations

    visualization_path = Path("visualization/auto-tagger/")

    if not visualization_path.exists():
        visualization_path.mkdir(parents=True)

    # visualize each class
    for t in range(len(tags)):
        save_path = visualization_path / "out_{}.png".format(tags[t])
        if save_path.exists():
            print("{} already visualized".format(tags[t]))
            continue

        layer_idx = utils.find_layer_idx(model, "dense_2")

        # Swap softmax with linear
        model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(model)

        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=t,
                                   verbose=True)
        array_to_img(img).save(save_path)
        print("saved to {}".format(str(save_path)))

    # visualize each layer
    for layer_name in [layer.name for layer in model.layers]:
        save_path = visualization_path / "{}.png".format(layer_name)
        if save_path.exists():
            print("{} already visualized".format(layer_name))
            continue
        if any([x in layer_name for x in ["batch_normalization", "input"]]):
            print("skipping visualization of {}".format(layer_name))
            continue

        # Utility to search for layer index by name.
        # Alternatively we can specify this as -1 since it corresponds to the last layer.
        layer_idx = utils.find_layer_idx(model, layer_name)

        # Swap softmax with linear
        model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(model)

        img = visualize_activation(model, layer_idx, verbose=True)
        array_to_img(img).save(save_path)
        print("saved to {}".format(str(save_path)))
def cnnfeature_vis(model):
    from vis.visualization import visualize_activation
    from vis.utils import utils
    from keras import activations

    from matplotlib import pyplot as plt
    # %matplotlib inline
    plt.rcParams['figure.figsize'] = (18, 6)

    # Utility to search for layer index by name.
    # Alternatively we can specify this as -1 since it corresponds to the last layer.
    layer_idx = utils.find_layer_idx(model, 'preds')

    # Swap softmax with linear
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    # This is the output node we want to maximize.
    filter_idx = 0
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx)
    plt.imshow(img[..., 0])

    for output_idx in np.arange(10):
        # Lets turn off verbose output this time to avoid clutter and just see the output.
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=output_idx,
                                   input_range=(0., 1.))
        plt.figure()
        plt.title('Networks perception of {}'.format(output_idx))
        plt.imshow(img[..., 0])

    # Visualizations without swapping softmax
    # Swap linear back with softmax
    model.layers[layer_idx].activation = activations.softmax
    model = utils.apply_modifications(model)

    for output_idx in np.arange(10):
        # Lets turn off verbose output this time to avoid clutter and just see the output.
        # Visualizations without swapping softmax
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=output_idx,
                                   input_range=(0., 1.))
        plt.figure()
        plt.title('Networks perception of {}'.format(output_idx))
        plt.imshow(img[..., 0])
        plt.show()
Beispiel #4
0
def ShowModelActivationMaximization(model, layer_name):
    # Utility to search for layer index by name.
    # Alternatively we can specify this as -1 since it corresponds to the last layer.
    layer_idx = utils.find_layer_idx(model, layer_name)

    # Swap softmax with linear
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    # This is the output node we want to maximize.
    filter_idx = 0
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=filter_idx,
                               input_range=(0., 1.))
    plt.imshow(img[..., 0])
    file_name = "tmp/ActivationMax/" + layer_name + ".jpg"
    plt.imsave(file_name, img[..., 0])
    # plt.show()

    classes_num = model.output_shape[1]
    for output_idx in np.arange(classes_num):
        # Lets turn off verbose output this time to avoid clutter and just see the output.
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=output_idx,
                                   input_range=(0., 1.))
        img_name = "tmp/ActivationMax/%s_%s.jpg" % (
            layer_name, TrainingDefines.ACTION_NAME[output_idx])
        plt.imsave(img_name, img[..., 0])

    return
Beispiel #5
0
def get_saliency(model, negate=False):

    layer_idx = 17
    model.layers[layer_idx].activation = activations.linear
    model = ut.apply_modifications(model)
    num_samples = 80
    car_images = utils.get_train_images_by_category(utils.Labels.ship,
                                                    num_samples)
    gradssave = np.array([[0 for x in range(0, 32)] for y in range(0, 32)])
    for img in car_images:
        if negate:
            grads = visualize_saliency(
                model,
                layer_idx,
                filter_indices=utils.Labels.automobile,
                seed_input=img) - visualize_saliency(
                    model,
                    layer_idx,
                    filter_indices=utils.Labels.automobile,
                    seed_input=img,
                    backprop_modifier='guided')
        else:
            grads = visualize_saliency(model,
                                       layer_idx,
                                       filter_indices=utils.Labels.airplane,
                                       seed_input=img,
                                       backprop_modifier='relu')
        gradssave = gradssave + grads
    return gradssave
Beispiel #6
0
    def visualize_dense_layer(self):
        self.logger.info('Visualizing dense layers')

        # create folder for saving visualization
        save_path = os.path.join(constants.MODEL_DIR, 'Visualization',
                                 self.model_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        # search the last dense layer with the name 'preds'
        layer_idx = utils.find_layer_idx(self.model, 'preds')

        # Swap softmax with linear
        self.model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(self.model)

        # output node we want to maximize
        for class_idx in np.arange(len(constants.CATEGORIES)):
            # Lets turn off verbose output this time to avoid clutter and just see the output.
            img = visualize_activation(model,
                                       layer_idx,
                                       filter_indices=class_idx,
                                       input_range=(0., 1.))
            plt.figure()
            plt.title('Networks perception of {}'.format(class_idx))
            plt.imshow(img[..., 0])

            # save the plot
            plot_name = 'dense-layer-{}.png'.format(
                constants.CATEGORIES[class_idx])
            plt.savefig(os.path.join(save_path, plot_name))
            plt.show()
Beispiel #7
0
    def plot(self, img, model, layer_name):
        """

            Parameters:
                img
                model (keras model instance) ResNet50
                layer_name (str)

            Return:
                self
        """
        layer = utils.find_layer(model, layer_name)
        model.layers[layer].activation = activations.linear
        model_mod = utils.apply_modifications(model)

        self.salmap = visualize_saliency(model_mod, layer, filter_indices=None,
                                   seed_input= img, backprop_modifier=None, \
                                   grad_modifier="absolute")
        plt.imshow(self.salmap)
        #        plt.savefig('SalMapRaw.png', dpi=300)

        self.salmap_guassian_smoothed = ndimage.gaussian_filter(
            self.salmap[:, :, 2], sigma=5)
        plt.imshow(img)
        plt.imshow(self.salmap_guassian_smoothed, alpha=.7)
        plt.axis('off')

        #        plt.savefig('SalMapScale.png', dpi=300)
        return self
def showSaliencyMap(showModel, layerName, showImage, imageSize):
    layer_idx = utils.find_layer_idx(showModel, layerName)
    showModel.layers[layer_idx].activation = activations.linear
    showModel = utils.apply_modifications(showModel)
    plt.rcParams['figure.figsize'] = (18, 6)
    for modifier in [None, 'guided', 'relu']:
        plt.figure()
        showImage = np.array(showImage)
        f, ax = plt.subplots(1, len(showImage))
        plt.suptitle("vanilla" if modifier is None else modifier)
        for i, img in enumerate(showImage):
            img = np.array(img)
            grads = visualize_cam(showModel,
                                  layer_idx,
                                  filter_indices=3,
                                  seed_input=img,
                                  backprop_modifier=modifier)
            # overlay the heatmap onto original image.
            jet_heatmap = np.uint8(cm.jet(grads)[..., :3] * 255)
            jet_heatmap = np.array(np.reshape(jet_heatmap,
                                              (imageSize, imageSize, 3)),
                                   dtype=np.uint8)
            if len(showImage) == 1:
                # for single image
                ax.imshow(overlay(jet_heatmap, img))
            else:
                # for multi images
                ax[i].imshow(overlay(jet_heatmap, img))
        plt.show()
Beispiel #9
0
def test():
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)

    # Utility to search for layer index by name.
    # Alternatively we can specify this as -1 since it corresponds to the last layer.
    layer_idx = utils.find_layer_idx(model, 'predictions')

    # Swap softmax with linear
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    plt.rcParams['figure.figsize'] = (18, 6)

    img1 = utils.load_img('images/ouzel1.jpg', target_size=(224, 224))
    img2 = utils.load_img('images/ouzel2.jpg', target_size=(224, 224))

    # f, ax = plt.subplots(1, 2)
    # ax[0].imshow(img1)
    # ax[1].imshow(img2)

    f, ax = plt.subplots(1, 2)

    for i, img in enumerate([img1, img2]):
        # 20 is the imagenet index corresponding to `ouzel`
        # heatmap = saliency.visualize_cam(model, layer_idx, filter_indices=20, seed_input=img,backprop_modifier='guided')
        heatmap = saliency.visualize_saliency(model, layer_idx, filter_indices=20, seed_input=img,backprop_modifier=None)
        print (np.shape(heatmap))
        # Lets overlay the heatmap onto original image.
        ax[i].imshow(overlay(heatmap,img))

    plt.show()
    def init_salient(self, model):
        # Utility to search for layer index by name. 
        # Alternatively we can specify this as -1 since it corresponds to the last layer.
        first_output_name = None
        for i, layer in enumerate(model.layers):
            if first_output_name is None and "dropout" not in layer.name.lower() and "out" in layer.name.lower():
                first_output_name = layer.name
                layer_idx = i

        if first_output_name is None:
            print("Failed to find the model layer named with 'out'. Skipping salient.")
            return False

        print("####################")
        print("Visualizing activations on layer:", first_output_name)
        print("####################")
        
        # ensure we have linear activation
        model.layers[layer_idx].activation = activations.linear
        # build salient model and optimizer
        sal_model = utils.apply_modifications(model)
        modifier_fn = get('guided')
        sal_model_mod = modifier_fn(sal_model)
        losses = [
            (ActivationMaximization(sal_model_mod.layers[layer_idx], None), -1)
        ]
        self.opt = Optimizer(sal_model_mod.input, losses, norm_grads=False)
        return True
def get_sal_img():
    sal_ori = model.extract_sal_img(x, layer_idx, filter_indices)
    save_fig(sal_img, 'vis-imgs/watermark' + str(num_val))

    sal_pef = moel_perfect.extract_sal_img(x, layer_idx, filter_indices)
    save_fig(sal_img, 'vis-imgs/watermark' + str(num_val))

    index = None
    for idx, layer in enumerate(model.model.layers):
        print(idx, layer)

    #layer_idx = utils.find_layer_idx(model.model, 'dense_2')
    layer_idx = -1
    filter_indices = 2

    # Swap softmax with linear
    model.model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    img1 = x_test[0]
    img2 = x_test[1]
    #save_saliency_img(model, img1, img2, layer_idx, filter_indices)

    saliency = perfect_model.extract_saliency(img1, layer_idx, filter_indices)

    save_fig(
        grads, 'vis-imgs/cifar10/extracted_saliency_give_up_ratio_' +
        str(args.give_up_ratio) + '_' + str(num_pass) + '.png')
    save_fig(img1.reshape(self.img_rows, self.img_cols, self.img_chns),
             'vis-imgs/cifar10/real_img_' + str(num_pass) + '.png')

    return saliency
def saliency(model, input_images, input_labels):
    """Function that computes the attention map visualization.
    Args:
        model: A keras.model
        input_images: Array of 3D images (height, width, 3) of which the attention is computed
        input_labels: The class label for each input image
    Returns:
        A list of attention maps
    """
    layer_idx = -1

    # Swap softmax with linear
    model.layers[-2].activation = activations.linear
    model = utils.apply_modifications(model)

    # This is the output node we want to maximize.
    vis_images = []
    for l in range(len(input_images)):
        img = input_images[l]
        label = input_labels[l]
        grads = visualize_saliency(model,
                                   layer_idx,
                                   filter_indices=label,
                                   seed_input=img)
        vis_images.append(grads)
    return vis_images
Beispiel #13
0
    def vis_saliency(self, x_test, scores, img_idx, fname, output_path):
        json_fname = os.path.join(self.output_path, 'model.json')
        layer_idx = utils.find_layer_idx(self.model, 'predictions')
        self.model.layers[layer_idx].activation = activations.linear
        self.model = utils.apply_modifications(self.model)
        classlabel = ['PA', 'Genuine']

        for class_idx in range(len(classlabel)):

            seed_input = x_test[img_idx]
            fname_original = fname.split('/')[:-6] + [
                '3',
                'origin_image',
            ] + fname.split('/')[-4:]
            fname_original = '/'.join(fname_original)
            fname_original = "{}.png".format(
                os.path.splitext(fname_original)[0])
            img = cv2.imread(fname_original, cv2.IMREAD_COLOR)[:, :, ::-1]
            output_fname = os.path.join(
                output_path,
                'vis_saliency',
                classlabel[class_idx],
                os.path.relpath(fname, output_path).replace('../', ''),
            )
            output_fname = "{}.png".format(os.path.splitext(output_fname)[0])
            safe_create_dir(os.path.dirname(output_fname))
            grad_top = visualize_saliency(self.model, layer_idx, class_idx,
                                          seed_input)
            print('-- saving visualizations in', output_fname)
            self.plot_map(grad_top, img, classlabel[class_idx],
                          scores[img_idx, class_idx], output_fname)
Beispiel #14
0
    def plot_convs_heatmap(self):
        layer_idx = -1

        self.model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(self.model)
        names = []
        for layer in self.model.layers:
            if isinstance(layer, _Conv):
                names.append(layer.name)

        pred_class =  np.argmax(self.model.predict(self.x))
        fig = plt.figure()

        for i in range(len(names)):
            name = names[i]

            print('Calculating heatmap for ', name)
            penult_layer_idx = utils.find_layer_idx(model, name)
            heatmap = visualize_cam(model, layer_idx, filter_indices=[pred_class], seed_input=self.seed_img,penultimate_layer_idx=penult_layer_idx, backprop_modifier=None)
            sp = fig.add_subplot(6, 9, i + 1)
            sp.set_title(name,fontsize=7)
            sp.imshow(overlay(self.seed_img, heatmap))
            sp.get_xaxis().set_visible(False)
            sp.get_yaxis().set_visible(False)

        plt.show()
Beispiel #15
0
def main():
    model = load_model('model2.h5py')
    model.summary()
    # swap softmax activation function to linear
    layer_idx = -1
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)


    layer_names = ['leaky_re_lu_1', 'leaky_re_lu_2', 'leaky_re_lu_3', 'leaky_re_lu_4']
    for lid in range(len(layer_names)):
        layer_name = layer_names[lid]
        layer_idx = utils.find_layer_idx(model, layer_name)
        filters = np.arange(get_num_filters(model.layers[layer_idx]))
        filters = _shuffle(filters)
        vis_images = []
        for idx in range(16):
            indices = filters[idx]
            img = visualize_activation(model, layer_idx, filter_indices=indices, tv_weight=0.,
                               input_modifiers=[Jitter(0.5)])
            vis_images.append(img)
        #img = img.reshape((48, 48))
        #plt.imshow(img, cmap="Blues")
        #plt.show()

        stitched = utils.stitch_images(vis_images, cols=8)
        plt.figure()
        plt.axis('off')
        shape = stitched.shape
        stitched = stitched.reshape((shape[0], shape[1]))
        plt.imshow(stitched)
        plt.title(layer_name)
        plt.tight_layout()
        plt.savefig('Filter_{}.png'.format(lid))
Beispiel #16
0
 def apply(self, model, image, layer, filter):
     # Changer l'activation softmax par linear
     model.layers[layer].activation = activations.linear
     model = utils.apply_modifications(model)
     penultimate_layer_idx = None
     if layer == 2 and isinstance(
             model.layers[1], (_Conv, _Pooling1D, _Pooling2D, _Pooling3D)):
         penultimate_layer_idx = 1
     elif layer <= 2 and isinstance(
             model.layers[0], (_Conv, _Pooling1D, _Pooling2D, _Pooling3D)):
         penultimate_layer_idx = 0
     grads = visualize_cam(model,
                           layer,
                           filter_indices=filter,
                           backprop_modifier="guided",
                           grad_modifier=None,
                           seed_input=image,
                           penultimate_layer_idx=penultimate_layer_idx)
     shape = model.layers[0].input_shape
     if shape[len(shape) - 1] == 1:
         img = None
         if K.image_data_format() == 'channels_first':
             img = np.zeros(shape=(int(model.input.shape[2]),
                                   int(model.input.shape[3]), 3))
         else:
             img = np.zeros(shape=(int(model.input.shape[1]),
                                   int(model.input.shape[2]), 3))
         for i in range(len(image)):
             for j in range(len(image[i])):
                 img[i][j][0] = image[i][j][0]
                 img[i][j][1] = image[i][j][0]
                 img[i][j][2] = image[i][j][0]
         image = img
     grads = overlay(grads, image)
     return grads
    def show_attention_evolution(self, num_epoch, class_name, image_path):
        if class_name not in self.labels:
            print("ラベル  が間違っているよ" + str(class_name))

        class_index = self.labels.index(class_name)

        two_ep = int(num_epoch / 4)
        thr_ep = int(num_epoch / 2)
        four_ep = int(num_epoch * 3 / 4)
        model_epochs = [1, two_ep, thr_ep, four_ep, num_epoch]
        for idx, epoch in enumerate(model_epochs):

            print("epoch" + str(epoch))
            self.load_model(epoch, silent=True)
            layer_idx = vis_utils.find_layer_idx(self.model, 'predictions')

            # Swap softmax with linear
            self.model.layers[layer_idx].activation = activations.linear
            vis_model = vis_utils.apply_modifications(self.model)

            x = vis_utils.load_img(image_path,
                                   target_size=(self.img_width,
                                                self.img_height))
            grads = visualize_cam(vis_model,
                                  layer_idx,
                                  filter_indices=class_index,
                                  seed_input=x,
                                  backprop_modifier="guided")
            show_img_array(grads)
            show_img_array(np.squeeze(x))
Beispiel #18
0
def main():
    parser = argparse.ArgumentParser(
        description='test AD recognition')
    parser.add_argument('--input', type=str, required=True, help="path to test data")
    parser.add_argument('--model', type=str, required=True, help="path to pre-trained model")
    parser.add_argument('--id', type=int, required=True, help="data id")
    args = parser.parse_args()

    model_dir = os.path.join(os.path.dirname(os.getcwd()), args.model)
    data = loaddata(args.input, 'testa.h5')
    print('data_shape:{}'.format(data.shape))
    print("[INFO] loading pre-trained network...")
    json_file = open(model_dir+'AD_3dcnnmodel.json', 'r')
    model_json = json_file.read()
    json_file.close()
    model = model_from_json(model_json)
    # load weights into new model
    model.load_weights(model_dir+"AD_3dcnnmodel.hd5")
    model.summary()

    layer_idx = utils.find_layer_idx(model, 'activation_10')
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    grads = visualize_saliency(model, layer_idx, filter_indices=0, seed_input=data[args.id], backprop_modifier='guided', keepdims=True)
    volume1 = np.squeeze(data[args.id], axis=0)
    volume1 = (volume1 - np.min(volume1)) / (np.max(volume1) - np.min(volume1))
    fig, axs = plt.subplots(1, 2, figsize=(16, 10), constrained_layout=True)
    axs[1].imshow(volume1[39,:,:], cmap='gray')
    plt.show()
    import pdb
    pdb.set_trace()
    volume2 = np.squeeze(grads, axis=0)
    vol= overlay(volume1, volume2)
    axial_planes = []
    for i in range(vol.shape[0]):
        axial_planes.append(vol[i,:,:])

    # Matplotlib animate heart
    Hz = np.zeros([vol.shape[1], vol.shape[2]])
    im = ax.imshow(Hz)

    def init():
        im.set_data(np.zeros(Hz.shape))
        return [im]

    def animate(i):
        im.set_data(axial_planes[i])
        im.autoscale()

        return [im]

    anim = animation.FuncAnimation(fig,
                                   animate,
                                   init_func=init,
                                   frames=len(axial_planes),
                                   interval=100,
                                   blit=True)
    plt.show()
Beispiel #19
0
def predict_and_visualize(model, indices, results_dir):
    f = h5py.File(workdir + 'ibis.hdf5', 'r')
    images = f['ibis_t1']
    labels = f['qc_label']
    filenames = f['filename']

    predictions = []

    with open(results_dir + 'test_images.csv', 'w') as output_file:
        output_writer = csv.writer(output_file)
        output_writer.writerow(['Filename', 'Probability'])

        for index in indices:
            img = images[index, target_size[0]//2, ...][np.newaxis, ..., np.newaxis]
            label = labels[index, ...]

            prediction = model.predict(img, batch_size=1)
            print('probs:', prediction[0])

            output_writer.writerow([filenames[index, ...], prediction[0][0], np.argmax(label)])

            predictions.append(np.argmax(prediction[0]))


    for i, (index, prediction) in enumerate(zip(indices, predictions)):

        layer_idx = utils.find_layer_idx(model, 'predictions')
        model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(model)

        grads = visualize_cam(model, layer_idx, filter_indices=prediction, seed_input=img[0, ...], backprop_modifier='guided')

        heatmap = np.uint8(cm.jet(grads)[:,:,0,:3]*255)
        gray = np.uint8(img[0, :, :, :]*255)
        gray3 = np.dstack((gray,)*3)

        print('image shape, heatmap shape', gray3.shape, heatmap.shape)

        plt.imshow(overlay(heatmap, gray3, alpha=0.25))

        actual = np.argmax(labels[index, ...])
        if prediction == actual:
            decision = '_right_'
        else:
            decision = '_wrong_'

        if actual == 1:
            qc_status = 'PASS'
        else:
            qc_status = 'FAIL'

        # filename = qc_status + decision + filenames[index, ...][:-4] + '.png'
        filename = str(i) + decision + qc_status + '.png'

        plt.axis('off')
        plt.savefig(results_dir + filename, bbox_inches='tight')
        plt.clf()

    f.close()
Beispiel #20
0
 def plot_activation(self):
     layer_idx=-1
     self.model.layers[layer_idx].activation = activations.linear
     model = utils.apply_modifications(self.model)
     for pred_class in range(25)[11:12]:
         print(self.inv_dico.get(pred_class))
         actmap = visualize_activation(model,layer_idx,filter_indices=pred_class)
         plt.imsave('img.jpg',actmap)
Beispiel #21
0
def fullvideo_vis():
    T = 48
    img_col = 100
    img_row = 100
    img_chan = 3
    nb_class = 200
    model = Sequential()
    # 1st layer group
    model.add(Conv3D(64, 3, 3, 3, padding='same', name='conv1', subsample=(1, 1, 1),input_shape=(T, img_row, img_col, img_chan)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv3D(64, 3, 3, 3, padding='same', name='conv1_1', subsample=(1, 2, 2),activation='relu'))
    # model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),padding='same', name='pool1'))
    # 2nd layer group
    model.add(Conv3D(128, 3, 3, 3, padding='same', name='conv2', subsample=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv3D(128, 3, 3, 3, padding='same', name='conv2_1', subsample=(2, 2, 2),activation='relu'))
    # model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),padding='same', name='pool2'))
    # 3rd layer group
    model.add(Conv3D(256, 3, 3, 3,  padding='same', name='conv3b', subsample=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv3D(256, 3, 3, 3, padding='same', name='conv3_1', subsample=(2, 2, 2),activation='relu'))
    # model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),padding='same', name='pool3'))
    # 4th layer group
    model.add(Conv3D(256, 3, 3, 3, padding='same', name='conv4b', subsample=(2, 2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv3D(256, 3, 3, 3, padding='same', name='conv4_1', subsample=(2, 2, 2),activation='relu'))
    # model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),padding='same', name='pool4'))
    # 5th layer group
    # model.add(Conv3D(512, 2, 2, 2, padding='same', name='conv5b'))
    # model.add(BatchNormalization())
    # model.add(Activation('relu'))
    model.add(Flatten())
    # FC layers group
    model.add(Dense(1024, name='fc6'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(.5))

    model.add(Dense(1024, name='fc7'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(.5))
    model.add(Dense(nb_class, activation='softmax', name='fc8'))
    model.load_weights('./weights/fullvideo_14.139-1.0000.h5')

    # Swap softmax with linear, only needed when visualing softmax layer
    layer_idx = utils.find_layer_idx(model, 'fc8')
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)
    for filt_index in range(0,30):
        visualize_saliency_3Dcnn(model, layer_idx, filter_indices=filt_index, seed_input=get_imgsequence(pre_process=True,
        path='/home/ljg/Desktop/lipnet/data/5sec10/{}/1/'.format(filt_index+1),sampling=True,start_T=1,time_window=48),
        original_img=get_imgsequence(pre_process=False,path='/home/ljg/Desktop/lipnet/data/5sec10/{}/1/'.format(filt_index+1),
                            sampling=True,start_T=1,time_window=48),backprop_modifier= None,save_pathname='fullvideo_vis')
 def detect(self,
            filename,
            answer_text,
            verbose=False,
            layer_index=-1,
            mode="local"):
     if mode == "local":
         input_tensor = image_to_tensor(filename, self.img_height,
                                        self.img_width)
     else:
         #             if True:
         try:
             input_tensor = image_to_tensor(filename, self.img_height,
                                            self.img_width)
         except:
             raise ValueError("This URL is not supported!! Use other one.")
     detection = self.model.predict(input_tensor)[0]
     a = np.array(detection)
     detect_label = self.labels[a.argmax(0)]
     if verbose is True:
         print("結果 .... " + str(answer_text[detect_label]))
         img1 = vis_utils.load_img(filename,
                                   target_size=(self.img_height,
                                                self.img_width))
         # Swap softmax with linear
         layer_idx = vis_utils.find_layer_idx(self.model, 'predictions')
         self.model.layers[layer_idx].activation = activations.linear
         vis_model = vis_utils.apply_modifications(self.model)
         filter_index = a.argmax(0)
         grads = visualize_cam(
             vis_model,
             layer_idx,
             filter_index,  #クラス番号
             img1[:, :, :],
             backprop_modifier='guided')
         a = np.array(detection)
         fig = plt.figure(figsize=(10, 5))
         ax1 = fig.add_subplot(1, 2, 1)
         ax1.tick_params(labelbottom="off", bottom="off")
         ax1.grid(False)
         ax1.tick_params(labelleft="off", left=False)
         plt.yticks(color="None")
         ax1.set_xticklabels([])
         ax1.imshow(img1)
         ax1.imshow(grads, cmap='jet', alpha=0.6)
         ax1.set_title("Heat Map")
         sns.set(style="white", context="talk")
         f, ax1 = plt.subplots(1, 1, figsize=(8, 6), sharex=True)
         sns.barplot(self.labels, detection, palette="PiYG", ax=ax1)
         ax1.set_ylabel("Value")
         plt.tick_params(length=0)
         plt.grid(False)
         plt.show()
     else:
         print(detect_label)
         print(detection)
     print("detectメソッドが完了しました.")
Beispiel #23
0
def grad_viewer(model, img, prediction):
	flat_sorted_class = np.argsort(prediction.flatten())[::-1]
	flat_sorted_layer = utils.find_layer_idx(model, 'dense_2')
	model.layers[flat_sorted_layer].activation = keras.activations.linear
	model = utils.apply_modifications(model)
	generate_grad(model, img, flat_sorted_class, flat_sorted_layer)
	

	
    def show_layer_activation(self, layer_name, filter_indices):
        layer_idx = vis_utils.find_layer_idx(self.model, layer_name)

        self.model.layers[layer_idx].activation = activations.linear
        vis_model = vis_utils.apply_modifications(self.model)

        img = visualize_activation(vis_model,
                                   layer_idx,
                                   filter_indices=filter_indices)
        show_img_array(img)
Beispiel #25
0
def saliency2(image_path,model):
    #img = utils.load_img(image_path, target_size=(256, 256))
    img = png2numpy(image_path)
    #layer_idx = utils.find_layer_idx(model, 'predictions')
    layer_idx=-1
    print(layer_idx)
    # Swap softmax with linear
    print(model.layers[layer_idx].activation)
    model.layers[layer_idx].activation = activations.linear
    print(model.layers[layer_idx].activation)
    model = utils.apply_modifications(model)
    modifier='guided'
    out_img=os.path.basename(image_path).replace('.png','')+'_'+str(modifier)+'_saliency.png'
    plt.figure()
    f, ax = plt.subplots(1, 1)
    #plt.suptitle("vanilla" if modifier is None else modifier)
    # 20 is the imagenet index corresponding to `ouzel`
    #img_inp=png2numpy(image_path)
    img=np.reshape(img,(256,256,1))
    print(img)
    #img=np.delete(img,2,1)
    #print(img.shape)
    #img=img[:,:,]
    print(img.shape)
    #Image.fromarray(img).convert('L').save('test.png')
    cv.fromarray(img)
    cv2.imwrite('test.png',img)
    img=np.expand_dims(img,axis=0)
    print('img',img.shape)
    #print(img.shape)
    #grads = visualize_saliency(model, layer_idx, filter_indices=0, seed_input=img, backprop_modifier=modifier)
    model.summary()
    grads = visualize_saliency(model, layer_idx, filter_indices=0, seed_input=img, backprop_modifier=modifier)
    #print('grads',grads.shape)
    print('grads',grads.shape)
    print(grads)
    #print('grads',np.min(grads, axis=2))
    #print('grads',np.mean(grads,axis=2))
    #print('grads',np.max(grads,axis=2))
    #grads=(grads*255./np.max(grads)).astype(np.uint8)
    #Image.fromarray(grads).save(out_img)
    #continue
    # Lets overlay the heatmap onto original image.
    #jet_heatmap = np.uint8(cm.jet(grads)[..., :1] * 255)
    #print(jet_heatmap.shape)
    #img = utils.load_img(image_path, target_size=(256, 256))
    #img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    print(img.shape)
    #jet_heatmap=np.delete(jet_heatmap,3,1)
    #print(jet_heatmap.shape)
    #ax.imshow(overlay(jet_heatmap, img))
    #ax.imshow(grads,cmap='jet')
    ax.imshow(grads,cmap='jet')
    f.savefig(out_img, format='PNG')
Beispiel #26
0
def getModelSaliency(model, input, classIdx):
    # Swap softmax with linear
    layer_idx = -1
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)
    grads = visualize_cam(
        model,
        layer_idx,
        filter_indices=classIdx,
        seed_input=input,
        grad_modifier=None)  # backprop_modifier='guided' 'relu'
    return grads
Beispiel #27
0
def calc_deep_taylor_values(model):
    """
    Calculates deep taylor decomposition values for the given training set and
    Multilayer Perceptron (MLP) model.

    :param dataset: Training or test data.
    :param model: Trained MLP model.
    :return: Deep taylor values
    """
    # Predict training and test probabilities
    test_probs = predict_probability(model.X_te, model.best_model, "MLP")
    train_probs = predict_probability(model.X_tr, model.best_model, "MLP")

    # Set last layer activation to linear. If this swapping is not done, the
    # results might be suboptimal
    model.best_model.layers[-1].activation = activations.linear
    stripped_model = utils.apply_modifications(model.best_model)

    # Calculate class weights
    train_input_weights = train_probs
    train_input_weights[np.where(
        model.y_tr == 0)] = (1 -
                             train_input_weights[np.where(model.y_tr == 0)])

    # Get last layer index
    class_idx = 0  # if the activation of last layer was sigmoid
    last_layer_idx = utils.find_layer_idx(model.best_model, "dense_2")

    # Get the input the model was trained on
    seed_input = model.X_tr.values
    # The deep taylor is bounded to a range which should be defined based on
    # the input range:
    input_range = [min(seed_input.flatten()), max(seed_input.flatten())]

    # Calculate global gradients of all patients (deep taylor)
    gradient_analyzer = innvestigate.create_analyzer(
        "deep_taylor.bounded",  # analysis method identifier
        stripped_model,  # model without softmax output
        low=input_range[0],
        high=input_range[1],
    )

    analysis = gradient_analyzer.analyze(seed_input)

    # Calculate score based average
    t_analysis = np.transpose(analysis, (1, 0))
    train_input_weights_s = np.squeeze(train_input_weights)
    score_avg_analysis = np.expand_dims(np.dot(t_analysis,
                                               train_input_weights_s),
                                        axis=0)

    return score_avg_analysis
Beispiel #28
0
def Get_Features(model,X,y):   
    # Swap softmax with linear
    layer_idx = 2
    model.layers[layer_idx].activation = keras.activations.linear
    model = utils.apply_modifications(model)
    #grads = visualize_cam(model, X[0], filter_indices=None)
    #print('it worked...')
    #input()
    #filter_idx = 0
    grads = visualize_saliency(model, layer_idx, filter_indices=filter_idx, seed_input=X[1])
    # Plot with 'jet' colormap to visualize as a heatmap.
    plt.imshow(grads, cmap='jet')
    input()
Beispiel #29
0
def vis_max(model, save_path):
    '''
        Generate the input image that maximizes the response of each class in the final FC layer
        Please also see keras-vis for details.
    '''
    layer_idx = vutils.find_layer_idx(model, 'predictions')
    model.layers[layer_idx].activation = activations.linear
    model = vutils.apply_modifications(model)
    for ind in range(len(config.classes)):
        print 'Generating for class {}'.format(ind)
        plt.rcParams['figure.figsize'] = (18, 6)
        img = visualize_activation(model, layer_idx, filter_indices=ind)
        mpimg.imsave(save_path + str(ind) + ".png", img)
    print('Done')
Beispiel #30
0
def iden_visualization():

    model = get_model()
    model.load_weights('./weights/newnetwork_14.78-0.9933.h5')

    # Swap softmax with linear, only needed when visualing softmax layer
    layer_idx = utils.find_layer_idx(model, 'fc8')
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)
    for filt_index in range(0,200):
        visualize_saliency_3Dcnn(model, layer_idx, filter_indices=filt_index, seed_input=get_imgsequence(pre_process=True,
            path = '/home/ljg/Desktop/lipnet/data/5sec10/{}/1/'.format(filt_index+1)),
     original_img=get_imgsequence(pre_process=False,path='/home/ljg/Desktop/lipnet/data/5sec10/{}/1/'.format(filt_index+1)),
                                 backprop_modifier= None)