def apply_gradcam(img_path): original_img = cv2.imread(img_path) original_img = cv2.resize(original_img, (320, 240)) image = preprocess_img(img_path) preds = model_predict(image) i = np.argmax(preds[0]) # initialize our gradient class activation map and build the heatmap cam = GradCAM(TRAINED_MODEL, i) heatmap = cam.compute_heatmap(image) # resize the resulting heatmap to the original input image dimensions # and then overlay heatmap on top of the image heatmap = cv2.resize(heatmap, (original_img.shape[1], original_img.shape[0])) heatmap_legend, heatmap, output = cam.overlay_heatmap(heatmap, original_img, alpha=0.2) white_strip = 255 * np.ones((255, 1, 3), np.uint8) white_strip = cv2.resize(white_strip, (original_img.shape[1], 20)) output = np.vstack([heatmap_legend, white_strip, heatmap, output]) return output
# use the network to make predictions on the input imag and find # the class label index with the largest corresponding probability preds = model.predict(image) i = np.argmax(preds[0]) # decode the ImageNet predictions to obtain the human-readable label decoded = imagenet_utils.decode_predictions(preds) (imagenetID, label, prob) = decoded[0][0] label = "{}: {:.2f}%".format(label, prob * 100) print("[INFO] {}".format(label)) # initialize our gradient class activation map and build the heatmap cam = GradCAM(model, i) heatmap = cam.compute_heatmap(image) # resize the resulting heatmap to the original input image dimensions # and then overlay heatmap on top of the image heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0])) (heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.5) # draw the predicted label on the output image cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1) cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) # display the original image and resulting heatmap and output image # to our screen output = np.vstack([orig, heatmap, output]) output = imutils.resize(output, height=700) cv2.imshow("Output", output) cv2.waitKey(0)
def heatmap(self): labels = {'0': 0, '1': 1} class_labels = ["0 - No Hemmorhage", "1 - Hemmorhage"] filename = QFileDialog.getOpenFileName() name = filename[0] from tensorflow.keras.preprocessing import image new_model = load_model("trainn_5.h5") keyboard = np.zeros((600, 1000, 3), np.uint8) image_path = name path = name test_img_load = load_img(image_path, target_size=(128, 128, 3)) test_img = image.img_to_array(test_img_load) test_img = np.expand_dims(test_img, axis=0) test_img /= 255 label_map_inv = {v: k for k, v in labels.items()} result = new_model.predict(test_img) prediction = result.argmax(axis=1) i = label_map_inv[int(prediction)] label = class_labels[(int(i))] image = load_img(path, target_size=(128, 128)) image = img_to_array(image) image = np.expand_dims(image, axis=0) image = imagenet_utils.preprocess_input(image) orig = cv2.imread(path) resized = cv2.resize(orig, (128, 128)) cam = GradCAM(new_model, int(i)) heatmap = cam.compute_heatmap(test_img) heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0])) (heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.3) #cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1) #cv2.putText(output, "re", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, #0.8, (255, 255, 255), 2) output = np.hstack([orig, output]) keyboard = cv2.resize(keyboard, (output.shape[1], 100)) key = np.vstack([keyboard, output]) key = imutils.resize(key, height=500) if label == "0 - No Hemmorhage": color = (0, 255, 0) else: color = (0, 0, 255) cv2.putText(key, label, ((int(((key.shape[1]) / 2)) - 180), 40), 1, 2, color, 2) cv2.imshow("Output", key) cv2.waitKey(0)