Пример #1
0
    def gen_heatmap(self,
                    img_input,
                    pred_class,
                    gen_gif=True,
                    gif_fps=1,
                    blend_original_image=True,
                    norm_reverse=True,
                    base_dir_save='/tmp'):

        gradcam = self.__grad_cam(self.model, img_input, pred_class,
                                  self.layer_name)
        jetcam = cv2.applyColorMap(np.uint8(255 * gradcam), cv2.COLORMAP_JET)

        str_uuid = str(uuid.uuid1())
        os.makedirs(os.path.join(base_dir_save, str_uuid), exist_ok=True)

        if gen_gif:
            image_original = img_input[0, :]
            from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
            if norm_reverse:
                image_original = input_norm_reverse(image_original)
            image_original = image_original.astype(np.uint8)

            filename_original = os.path.join(base_dir_save, str_uuid,
                                             'original.jpg')
            cv2.imwrite(filename_original, image_original)
            filename_CAM = os.path.join(base_dir_save, str_uuid,
                                        'Grad_CAM{}.jpg'.format(pred_class))
            cv2.imwrite(filename_CAM, jetcam)

            import imageio
            mg_paths = [filename_original, filename_CAM]
            gif_images = []
            for path in mg_paths:
                gif_images.append(imageio.imread(path))
            img_file_gif = os.path.join(base_dir_save, str_uuid,
                                        'Grad_CAM{}.gif'.format(pred_class))
            imageio.mimsave(img_file_gif, gif_images, fps=gif_fps)
            return img_file_gif
        else:
            if blend_original_image:
                image_original = img_input[0, :]
                from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
                if norm_reverse:
                    image_original = input_norm_reverse(image_original)
                image_original = image_original.astype(np.uint8)

                jetcam = (np.float32(jetcam) + image_original) / 2

            filename_CAM = os.path.join(base_dir_save, str_uuid,
                                        'Grad_CAM{}.jpg'.format(pred_class))
            cv2.imwrite(filename_CAM, jetcam)

            return filename_CAM
Пример #2
0
def server_grad_cam(model_no,
                    img_source,
                    pred,
                    preprocess=True,
                    blend_original_image=True):

    model = dicts_models[model_no]['model_original']

    image_size = dicts_models[model_no]['image_size']

    if preprocess:
        img_preprocess = my_preprocess.do_preprocess(img_source, crop_size=384)
        img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
            img_preprocess, image_shape=(image_size, image_size, 3))
    else:
        img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
            img_source, image_shape=(image_size, image_size, 3))

    penultimate_layer = get_last_conv_layer_number(model)
    layer_idx = len(model.layers) - 1

    modifier = 'guided'  # [None, 'guided', 'relu']
    # too slow
    grads = visualize_cam(model,
                          layer_idx,
                          filter_indices=[pred],
                          seed_input=img_input,
                          penultimate_layer_idx=penultimate_layer,
                          backprop_modifier=modifier)

    cam = cv2.applyColorMap(np.uint8(255 * grads), cv2.COLORMAP_JET)

    if blend_original_image:
        # Return to BGR [0..255] from the preprocessed image
        image_original = img_input[0, :]

        from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
        image_original = input_norm_reverse(image_original)
        image_original = image_original.astype(np.uint8)

        image_original -= np.min(image_original)
        image_original = np.minimum(image_original, 255)

        cam = np.float32(cam) + np.float32(image_original)
        cam = 255 * cam / np.max(cam)

    # 传过来的是web目录
    str_uuid = str(uuid.uuid1())
    filename_CAM = os.path.join(BASE_DIR_SAVE, str_uuid,
                                'Grad_CAM{}.jpg'.format(pred))

    if not os.path.exists(os.path.dirname(filename_CAM)):
        os.makedirs(os.path.dirname(filename_CAM))

    cv2.imwrite(filename_CAM, cam)

    return filename_CAM
Пример #3
0
    def gen_integrated_gradients(self,
                                 img_input,
                                 pred_class,
                                 gen_gif=True,
                                 gif_fps=1,
                                 norm_reverse=True,
                                 base_dir_save='/tmp'):

        integrated_gradients = self.__get_integrated_gradients(
            img_input, pred_class).numpy()
        if integrated_gradients.ndim == 4:
            integrated_gradients = integrated_gradients[0]

        str_uuid = str(uuid.uuid1())
        os.makedirs(os.path.join(base_dir_save, str_uuid), exist_ok=True)

        integrated_gradients = np.mean(integrated_gradients,
                                       axis=-1)  # (299,299,3) ->(299,299)
        integrated_gradients = np.maximum(integrated_gradients, 0)  # ReLU
        integrated_gradients /= np.max(integrated_gradients)  # heatmap:0-1
        # cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)

        filename_gradient = os.path.join(
            base_dir_save, str_uuid, 'integrated_gradients{}.jpg'.format(1))
        cv2.imwrite(filename_gradient, integrated_gradients * 255)

        if gen_gif:
            image_original = img_input[0, :]
            from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
            if norm_reverse:
                image_original = input_norm_reverse(image_original)
            image_original = image_original.astype(np.uint8)

            filename_original = os.path.join(base_dir_save, str_uuid,
                                             'original.jpg')
            cv2.imwrite(filename_original, image_original)

            import imageio
            mg_paths = [filename_original, filename_gradient]
            gif_images = []
            for path in mg_paths:
                gif_images.append(imageio.imread(path))
            img_file_gif = os.path.join(
                base_dir_save, str_uuid,
                'integrated_gradients{}.gif'.format(pred_class))
            imageio.mimsave(img_file_gif, gif_images, fps=gif_fps)
            return img_file_gif

        else:
            return filename_gradient
Пример #4
0
def server_gradcam_plusplus(model_no,
                            img_source,
                            pred,
                            preprocess=True,
                            blend_original_image=True):

    image_size = dicts_models[model_no]['image_size']

    if preprocess:
        img_preprocess = my_preprocess.do_preprocess(img_source, crop_size=384)
        img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
            img_preprocess, image_shape=(image_size, image_size, 3))
    else:
        img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
            img_source, image_shape=(image_size, image_size, 3))

    gradcamplus = grad_cam_plus(dicts_models[model_no]['model_original'],
                                img_input,
                                pred=pred,
                                image_size=image_size)

    cam = cv2.applyColorMap(np.uint8(255 * gradcamplus), cv2.COLORMAP_JET)

    if blend_original_image:
        # Return to BGR [0..255] from the preprocessed image
        image_original = img_input[0, :]

        from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
        image_original = input_norm_reverse(image_original)
        image_original = image_original.astype(np.uint8)

        image_original -= np.min(image_original)
        image_original = np.minimum(image_original, 255)

        cam = np.float32(cam) + np.float32(image_original)
        cam = 255 * cam / np.max(cam)

    #region 将CAM保存到文件

    # 传过来的是web目录
    str_uuid = str(uuid.uuid1())
    filename_CAM = os.path.join(BASE_DIR_SAVE, str_uuid,
                                'GradCAM_PlusPlus{}.jpg'.format(pred))

    if not os.path.exists(os.path.dirname(filename_CAM)):
        os.makedirs(os.path.dirname(filename_CAM))

    cv2.imwrite(filename_CAM, cam)

    return filename_CAM
Пример #5
0
    def shap_deep_explainer(self,
                            model_no,
                            num_reference,
                            img_input,
                            ranked_outputs=1,
                            norm_reverse=True,
                            blend_original_image=False,
                            gif_fps=1,
                            base_dir_save='/tmp/DeepExplain',
                            check_additivity=False):

        #region mini-batch because of GPU memory limitation
        list_shap_values = []

        batch_size = self.dicts_models[model_no]['batch_size']
        split_times = math.ceil(num_reference / batch_size)
        for i in range(split_times):
            shap_values_tmp1 = self.list_e[model_no][i].shap_values(
                img_input,
                ranked_outputs=ranked_outputs,
                check_additivity=check_additivity)
            # shap_values ranked_outputs
            # [0] [0] (1,299,299,3)
            # [1] predict_class array
            shap_values_copy = copy.deepcopy(shap_values_tmp1)
            list_shap_values.append(shap_values_copy)

        for i in range(ranked_outputs):
            for j in range(len(list_shap_values)):
                if j == 0:
                    shap_values_tmp2 = list_shap_values[0][0][i]
                else:
                    shap_values_tmp2 += list_shap_values[j][0][i]

            shap_values_results = copy.deepcopy(list_shap_values[0])
            shap_values_results[0][i] = shap_values_tmp2 / split_times

        #endregion

        #region save files
        str_uuid = str(uuid.uuid1())
        list_classes = []
        list_images = []
        for i in range(ranked_outputs):
            predict_class = int(
                shap_values_results[1][0][i])  #numpy int 64 - int
            list_classes.append(predict_class)

            save_filename = os.path.join(
                base_dir_save, str_uuid,
                'Shap_Deep_Explainer{}.jpg'.format(predict_class))
            os.makedirs(os.path.dirname(save_filename), exist_ok=True)
            list_images.append(save_filename)

        pred_class_num = len(shap_values_results[0])

        if blend_original_image:
            if norm_reverse:
                img_original = np.uint8(input_norm_reverse(img_input[0]))
            else:
                img_original = np.uint8(img_input[0])
            img_original_file = os.path.join(os.path.dirname(list_images[0]),
                                             'deepshap_original.jpg')
            cv2.imwrite(img_original_file, img_original)

        for i in range(pred_class_num):
            # predict_max_class = attributions[1][0][i]
            attribution1 = shap_values_results[0][i]

            #attributions.shape: (1, 299, 299, 3)
            data = attribution1[0]
            data = np.mean(data, -1)

            abs_max = np.percentile(np.abs(data), 100)
            abs_min = abs_max

            # dx, dy = 0.05, 0.05
            # xx = np.arange(0.0, data1.shape[1], dx)
            # yy = np.arange(0.0, data1.shape[0], dy)
            # xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
            # extent = xmin, xmax, ymin, ymax

            # cmap = 'RdBu_r'
            # cmap = 'gray'
            cmap = 'seismic'
            plt.axis('off')
            # plt.imshow(data1, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
            # plt.imshow(data, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)

            # fig = plt.gcf()
            # fig.set_size_inches(2.99 / 3, 2.99 / 3)  # dpi = 300, output = 700*700 pixels
            plt.gca().xaxis.set_major_locator(plt.NullLocator())
            plt.gca().yaxis.set_major_locator(plt.NullLocator())
            plt.subplots_adjust(top=1,
                                bottom=0,
                                right=1,
                                left=0,
                                hspace=0,
                                wspace=0)
            plt.margins(0, 0)

            if blend_original_image:
                plt.imshow(data,
                           interpolation='none',
                           cmap=cmap,
                           vmin=-abs_min,
                           vmax=abs_max)
                save_filename1 = list_images[i]
                plt.savefig(save_filename1, bbox_inches='tight', pad_inches=0)
                plt.close()

                img_heatmap = cv2.imread(list_images[i])
                (tmp_height, tmp_width) = img_original.shape[:-1]
                img_heatmap = cv2.resize(img_heatmap, (tmp_width, tmp_height))
                img_heatmap_file = os.path.join(
                    os.path.dirname(list_images[i]),
                    'deepshap_{0}.jpg'.format(i))
                cv2.imwrite(img_heatmap_file, img_heatmap)

                dst = cv2.addWeighted(img_original, 0.65, img_heatmap, 0.35, 0)
                img_blend_file = os.path.join(
                    os.path.dirname(list_images[i]),
                    'deepshap_blend_{0}.jpg'.format(i))
                cv2.imwrite(img_blend_file, dst)

                #region create gif
                import imageio
                mg_paths = [
                    img_original_file, img_heatmap_file, img_blend_file
                ]
                gif_images = []
                for path in mg_paths:
                    gif_images.append(imageio.imread(path))
                img_file_gif = os.path.join(os.path.dirname(list_images[i]),
                                            'deepshap_{0}.gif'.format(i))
                imageio.mimsave(img_file_gif, gif_images, fps=gif_fps)
                list_images[i] = img_file_gif
                #endregion
            else:
                plt.imshow(data,
                           interpolation='none',
                           cmap=cmap,
                           vmin=-abs_min,
                           vmax=abs_max)
                save_filename1 = list_images[i]
                plt.savefig(save_filename1, bbox_inches='tight', pad_inches=0)
                plt.close()

        #endregion

        return list_classes, list_images
Пример #6
0
def plot_heatmap_shap(attributions,  list_images, img_input, blend_original_image):

    pred_class_num = len(attributions[0])

    if blend_original_image:
        from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
        img_original = np.uint8(input_norm_reverse(img_input[0]))
        import cv2
        img_original = cv2.resize(img_original, (384, 384))
        img_original_file = os.path.join(os.path.dirname(list_images[0]), 'deepshap_original.jpg')
        cv2.imwrite(img_original_file, img_original)

    for i in range(pred_class_num):
        # predict_max_class = attributions[1][0][i]
        attribution1 = attributions[0][i]

        #attributions.shape: (1, 299, 299, 3)
        data = attribution1[0]
        data = np.mean(data, -1)

        abs_max = np.percentile(np.abs(data), 100)
        abs_min = abs_max

        # dx, dy = 0.05, 0.05
        # xx = np.arange(0.0, data1.shape[1], dx)
        # yy = np.arange(0.0, data1.shape[0], dy)
        # xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
        # extent = xmin, xmax, ymin, ymax

        # cmap = 'RdBu_r'
        # cmap = 'gray'
        cmap = 'seismic'
        plt.axis('off')
        # plt.imshow(data1, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
        # plt.imshow(data, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)

        # fig = plt.gcf()
        # fig.set_size_inches(2.99 / 3, 2.99 / 3)  # dpi = 300, output = 700*700 pixels
        plt.gca().xaxis.set_major_locator(plt.NullLocator())
        plt.gca().yaxis.set_major_locator(plt.NullLocator())
        plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
        plt.margins(0, 0)

        if blend_original_image:
            # cv2.imwrite('/tmp5/tmp/cv2.jpg', np.uint8(img_input[0]))
            # img_original = cv2.cvtColor(np.uint8(img_input[0]), cv2.COLOR_BGR2RGB)
            # plt.imshow(img_original)

            plt.imshow(data, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
            save_filename1 = list_images[i]
            plt.savefig(save_filename1, bbox_inches='tight', pad_inches=0)
            plt.close()

            img_heatmap = cv2.imread(list_images[i])
            img_heatmap = cv2.resize(img_heatmap, (384, 384))
            img_heatmap_file = os.path.join(os.path.dirname(list_images[i]), 'deepshap_{0}.jpg'.format(i))
            cv2.imwrite(img_heatmap_file, img_heatmap)

            dst = cv2.addWeighted(img_original, 0.65, img_heatmap, 0.35, 0)
            # cv2.imwrite('/tmp5/tmp/aaaaa.jpg', dst) #test code
            img_blend_file = os.path.join(os.path.dirname(list_images[i]), 'deepshap_blend_{0}.jpg'.format(i))
            cv2.imwrite(img_blend_file, dst)

            # fig.savefig('/tmp5/tmp/aaa1.png', format='png', dpi=299,  transparent=True,  pad_inches=0)
            # plt.savefig('/tmp5/tmp/aaa.jpg', bbox_inches='tight', pad_inches=0)

            #region create gif
            import imageio
            mg_paths = [img_original_file, img_heatmap_file, img_blend_file]
            gif_images = []
            for path in mg_paths:
                gif_images.append(imageio.imread(path))
            img_file_gif = os.path.join(os.path.dirname(list_images[i]), 'deepshap_{0}.gif'.format(i))
            imageio.mimsave(img_file_gif, gif_images, fps=GIF_FPS)
            list_images[i] = img_file_gif
            #endregion
        else:
            plt.imshow(data, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
            save_filename1 = list_images[i]
            plt.savefig(save_filename1, bbox_inches='tight', pad_inches=0)
            plt.close()
Пример #7
0
def server_cam(model_no,
               img_source,
               pred,
               cam_relu=True,
               preprocess=True,
               blend_original_image=True):

    image_size = dicts_models[model_no]['image_size']

    if preprocess:
        img_preprocess = my_preprocess.do_preprocess(img_source, crop_size=384)
        img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
            img_preprocess, image_shape=(image_size, image_size, 3))
    else:
        img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
            img_source, image_shape=(image_size, image_size, 3))

    #region generate CAM
    model1 = dicts_models[model_no]['model_cam']
    all_amp_layer_weights = dicts_models[model_no]['all_amp_layer_weights']

    last_conv_output, pred_vec = model1.predict(img_input)

    # pred = np.argmax(pred_vec)  # get model's prediction class
    # Remove single-dimensional entries from the shape of an array.
    last_conv_output = np.squeeze(last_conv_output)

    # get AMP layer weights
    amp_layer_weights = all_amp_layer_weights[:, pred]  # dim: (2048,)

    # jijie add relu
    # 对于每一个类别C,每个特征图K的均值都有一个对应的w
    if cam_relu:
        amp_layer_weights = np.maximum(amp_layer_weights, 0)

    cam_small = np.dot(last_conv_output, amp_layer_weights)  # dim: 224 x 224
    cam_small = np.maximum(cam_small, 0)  # ReLU
    cam = cv2.resize(cam_small, (image_size, image_size))  # 14*14 224*224
    heatmap = cam / np.max(cam)  # heatmap:0-1
    #cam: 0-255
    cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)

    if blend_original_image:
        # Return to BGR [0..255] from the preprocessed image
        image_original = img_input[0, :]

        from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
        image_original = input_norm_reverse(image_original)
        image_original = image_original.astype(np.uint8)

        image_original -= np.min(image_original)
        image_original = np.minimum(image_original, 255)

        cam = np.float32(cam) + np.float32(image_original)
        cam = 255 * cam / np.max(cam)

    #endregion

    #region 将CAM保存到文件

    str_uuid = str(uuid.uuid1())
    filename_CAM = os.path.join(BASE_DIR_SAVE, str_uuid,
                                'CAM{}.jpg'.format(pred))

    if not os.path.exists(os.path.dirname(filename_CAM)):
        os.makedirs(os.path.dirname(filename_CAM))

    cv2.imwrite(filename_CAM, cam)

    # endregion

    return filename_CAM
Пример #8
0
    def gen_heatmap(self,
                    img_input,
                    pred_class,
                    gen_gif=True,
                    gif_fps=1,
                    blend_original_image=True,
                    norm_reverse=True,
                    base_dir_save='/tmp'):

        # gradcamplus: 0-1
        gradcamplus = self.__grad_cam_plusplus(img_input,
                                               pred_class=pred_class)
        # cam: 0-255
        cam = cv2.applyColorMap(np.uint8(255 * gradcamplus), cv2.COLORMAP_JET)

        str_uuid = str(uuid.uuid1())
        os.makedirs(os.path.join(base_dir_save, str_uuid), exist_ok=True)

        if gen_gif:
            image_original = img_input[0, :]
            from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
            if norm_reverse:
                image_original = input_norm_reverse(image_original)
            image_original = image_original.astype(np.uint8)

            filename_original = os.path.join(base_dir_save, str_uuid,
                                             'original.jpg')
            cv2.imwrite(filename_original, image_original)

            filename_CAM = os.path.join(
                base_dir_save, str_uuid,
                'GradCAM_PlusPlus{}.jpg'.format(pred_class))
            cv2.imwrite(filename_CAM, cam)

            import imageio
            mg_paths = [filename_original, filename_CAM]
            gif_images = []
            for path in mg_paths:
                gif_images.append(imageio.imread(path))
            img_file_gif = os.path.join(
                base_dir_save, str_uuid,
                'GradCAM_PlusPlus{}.gif'.format(pred_class))
            imageio.mimsave(img_file_gif, gif_images, fps=gif_fps)
            return img_file_gif

        else:
            if blend_original_image:
                image_original = img_input[0, :]
                from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
                if norm_reverse:
                    image_original = input_norm_reverse(image_original)
                image_original = image_original.astype(np.uint8)

                image_original -= np.min(image_original)
                image_original = np.minimum(image_original, 255)

                cam = np.float32(cam) + np.float32(image_original)
                cam = 255 * cam / np.max(cam)

            filename_CAM = os.path.join(
                base_dir_save, str_uuid,
                'GradCAM_PlusPlus{}.jpg'.format(pred_class))
            cv2.imwrite(filename_CAM, cam)

            return filename_CAM
Пример #9
0
    def gen_heatmap(self,
                    img_input,
                    pred_class,
                    cam_relu=True,
                    gen_gif=True,
                    gif_fps=1,
                    blend_original_image=True,
                    norm_reverse=True,
                    base_dir_save='/tmp'):

        height, width = img_input.shape[1], img_input.shape[2]

        last_conv_output = self.model_last_conv.predict(img_input)
        last_conv_output = np.squeeze(last_conv_output)

        # get AMP layer weights
        amp_layer_weights = self.all_amp_layer_weights[:,
                                                       pred_class]  # dim: (2048,)

        if cam_relu:
            amp_layer_weights = np.maximum(amp_layer_weights, 0)

        cam_small = np.dot(last_conv_output,
                           amp_layer_weights)  # dim: 224 x 224
        cam = cv2.resize(cam_small, (width, height))  # 14*14 224*224
        cam = np.maximum(cam, 0)  # ReLU
        heatmap = cam / np.max(cam)  # heatmap:0-1

        # cam:0-255
        cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)

        str_uuid = str(uuid.uuid1())
        os.makedirs(os.path.join(base_dir_save, str_uuid), exist_ok=True)

        if gen_gif:
            image_original = img_input[0, :]
            from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
            if norm_reverse:
                image_original = input_norm_reverse(image_original)
            image_original = image_original.astype(np.uint8)

            filename_original = os.path.join(base_dir_save, str_uuid,
                                             'original.jpg')
            cv2.imwrite(filename_original, image_original)

            filename_CAM = os.path.join(base_dir_save, str_uuid,
                                        'CAM{}.jpg'.format(pred_class))
            cv2.imwrite(filename_CAM, cam)

            import imageio
            mg_paths = [filename_original, filename_CAM]
            gif_images = []
            for path in mg_paths:
                gif_images.append(imageio.imread(path))
            img_file_gif = os.path.join(base_dir_save, str_uuid,
                                        'CAM{}.gif'.format(pred_class))
            imageio.mimsave(img_file_gif, gif_images, fps=gif_fps)
            return img_file_gif

        else:
            if blend_original_image:
                image_original = img_input[0, :]
                from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
                if norm_reverse:
                    image_original = input_norm_reverse(image_original)
                image_original = image_original.astype(np.uint8)

                image_original -= np.min(image_original)
                image_original = np.minimum(image_original, 255)

                cam = np.float32(cam) + np.float32(image_original)
                cam = 255 * cam / np.max(cam)

                cam = (np.float32(cam) + image_original) / 2

            filename_CAM = os.path.join(base_dir_save, str_uuid,
                                        'CAM{}.jpg'.format(pred_class))
            cv2.imwrite(filename_CAM, cam)

        return filename_CAM
def server_gradcam_plusplus(dicts_models,
                            model_no,
                            img_source,
                            pred,
                            preprocess=True,
                            blend_original_image=True,
                            base_dir_save='/tmp3'):

    image_size = dicts_models[model_no]['image_size']

    # region preprocessing if needed, convert to input_tensor
    if isinstance(img_source, str):
        if preprocess:
            img_preprocess = my_preprocess.do_preprocess(img_source,
                                                         crop_size=384)
            img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
                img_preprocess, image_shape=(image_size, image_size, 3))
        else:
            img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
                img_source, image_shape=(image_size, image_size, 3))

    if isinstance(img_source, np.ndarray):
        if img_source.ndim == 4:
            img_input = img_source

    # endregion

    #region generating clas activation maps
    #gradcamplus: 0-1
    gradcamplus = grad_cam_plus(dicts_models[model_no]['model_original'],
                                img_input,
                                pred=pred,
                                image_size=image_size)
    # cam: 0-255
    cam = cv2.applyColorMap(np.uint8(255 * gradcamplus), cv2.COLORMAP_JET)
    #endregion

    #region blend original image
    if blend_original_image:
        # Return to BGR [0..255] from the preprocessed image
        image_original = img_input[0, :]

        from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
        image_original = input_norm_reverse(image_original)
        image_original = image_original.astype(np.uint8)

        image_original -= np.min(image_original)
        image_original = np.minimum(image_original, 255)

        cam = np.float32(cam) + np.float32(image_original)
        cam = 255 * cam / np.max(cam)

    #endregion

    #region 将CAM保存到文件

    # 传过来的是web目录
    str_uuid = str(uuid.uuid1())
    filename_CAM = os.path.join(base_dir_save, str_uuid,
                                'GradCAM_PlusPlus{}.jpg'.format(pred))

    if not os.path.exists(os.path.dirname(filename_CAM)):
        os.makedirs(os.path.dirname(filename_CAM))

    cv2.imwrite(filename_CAM, cam)

    return filename_CAM
def server_grad_cam(dicts_models,
                    model_no,
                    img_source,
                    pred,
                    preprocess=True,
                    blend_original_image=True,
                    base_dir_save='/tmp3'):

    model = dicts_models[model_no]['model_original']

    image_size = dicts_models[model_no]['image_size']

    #region preprocessing if needed, convert to input_tensor

    if isinstance(img_source, str):
        if preprocess:
            img_preprocess = my_preprocess.do_preprocess(img_source,
                                                         crop_size=384)
            img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
                img_preprocess, image_shape=(image_size, image_size, 3))
        else:
            img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
                img_source, image_shape=(image_size, image_size, 3))
    if isinstance(img_source, np.ndarray):
        if img_source.ndim == 4:
            img_input = img_source

    #endregion

    #region generating class activation maps
    penultimate_layer = get_last_conv_layer_number(model)
    layer_idx = len(model.layers) - 1

    modifier = 'guided'  # [None, 'guided', 'relu']
    # too slow
    grads = visualize_cam(model,
                          layer_idx,
                          filter_indices=[pred],
                          seed_input=img_input,
                          penultimate_layer_idx=penultimate_layer,
                          backprop_modifier=modifier)

    cam = cv2.applyColorMap(np.uint8(255 * grads), cv2.COLORMAP_JET)
    #endregion

    if blend_original_image:
        # Return to BGR [0..255] from the preprocessed image
        image_original = img_input[0, :]

        from LIBS.ImgPreprocess.my_image_norm import input_norm_reverse
        image_original = input_norm_reverse(image_original)
        image_original = image_original.astype(np.uint8)

        image_original -= np.min(image_original)
        image_original = np.minimum(image_original, 255)

        cam = np.float32(cam) + np.float32(image_original)
        cam = 255 * cam / np.max(cam)

    #region save CAMs to files
    str_uuid = str(uuid.uuid1())
    filename_CAM = os.path.join(base_dir_save, str_uuid,
                                'Grad_CAM{}.jpg'.format(pred))

    if not os.path.exists(os.path.dirname(filename_CAM)):
        os.makedirs(os.path.dirname(filename_CAM))

    cv2.imwrite(filename_CAM, cam)
    #endregion

    return filename_CAM