def gradcam(model, img_orig, img_b, pred, conf, label_max): img_encs = [] for lbl_index in range(label_max): if pred[lbl_index] > conf: cam = GradCAM(model, lbl_index) heatmap = cam.compute_heatmap(np.array([img_b])) heatmap = cv2.resize(heatmap, (img_orig.shape[1], img_orig.shape[0])) (heatmap, output) = cam.overlay_heatmap(heatmap, img_orig, alpha=0.5) _, out_enc = cv2.imencode(".jpg", output) out_enc = base64.b64encode(out_enc).decode('ascii') img_encs.append(out_enc) else: img_encs.append(None) return img_encs
preds = model.predict(image) i = np.argmax(preds[0]) # decode the ImageNet predictions to obtain the human-readable label decoded = imagenet_utils.decode_predictions(preds) (imagenetID, label, prob) = decoded[0][0] label = "{}: {:.2f}%".format(label, prob * 100) print("[INFO] {}".format(label)) # initialize our gradient class activation map and build the heatmap if args['layer'] == 'None': cam = GradCAM(model, i) else: cam = GradCAM(model, i, args['layer']) heatmap = cam.compute_heatmap(image) # resize the resulting heatmap to the original input image dimensions # and then overlay heatmap on top of the image heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0])) (heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.5) # draw the predicted label on the output image cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1) cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) # display the original image and resulting heatmap and output image # to our screen output = np.hstack([orig, heatmap, output]) output = imutils.resize(output, height=400)
if FLAGS.show_model_summary: visual_model.summary() else: visual_model = model_factory.get_model(FLAGS) FLAGS.batch_size = 1 test_generator = get_generator(FLAGS.test_csv,FLAGS) images_names = test_generator.get_images_names() for batch_i in tqdm(range(test_generator.steps)): batch, _ = test_generator.__getitem__(batch_i) image_path = os.path.join(FLAGS.image_directory, images_names[batch_i]) original = cv2.imread(image_path) preds = visual_model.predict(batch) predicted_class = np.argmax(preds[0]) label = f"Birad-{predicted_class + 1}" cam = GradCAM(visual_model, predicted_class) heatmap = cam.compute_heatmap(batch) heatmap = cv2.resize(heatmap, (original.shape[1], original.shape[0])) (heatmap, output) = cam.overlay_heatmap(heatmap, original, alpha=0.5) cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1) cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) cv2.imwrite(os.path.join(write_path,images_names[batch_i]),output)
# use the network to make predictions on the input image and find # the class label index with the largest corresponding probability preds = model.predict(image) i = np.argmax(preds[0]) class_names = ['Opencountry', 'coast', 'forest', 'highway', ',inside_city', 'mountain', 'street', 'tallbuilding'] label = class_names[i] prob = np.max(preds[0]) label = "{}: {:.2f}%".format(label, prob * 100) print("[INFO] {}".format(label)) # initialize our gradient class activation map and build the heatmap cam = GradCAM(model, i) heatmap = cam.compute_heatmap(image, normalize_grads=args["norm"]) # load the original image from disk (in OpenCV format) orig = cv2.imread(args["image"]) # resize the resulting heatmap to the original input image dimensions # and then overlay heatmap on top of the image heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0])) (heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.5, colormap=color_maps[args["color"]]) # draw the predicted label on the output image cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1) cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) # display the original image and resulting heatmap and output image