예제 #1
0
def gradcam(model, img_orig, img_b, pred, conf, label_max):
    img_encs = []
    for lbl_index in range(label_max):
        if pred[lbl_index] > conf:
            cam = GradCAM(model, lbl_index)
            heatmap = cam.compute_heatmap(np.array([img_b]))

            heatmap = cv2.resize(heatmap,
                                 (img_orig.shape[1], img_orig.shape[0]))
            (heatmap, output) = cam.overlay_heatmap(heatmap,
                                                    img_orig,
                                                    alpha=0.5)

            _, out_enc = cv2.imencode(".jpg", output)
            out_enc = base64.b64encode(out_enc).decode('ascii')
            img_encs.append(out_enc)
        else:
            img_encs.append(None)
    return img_encs
예제 #2
0
# decode the ImageNet predictions to obtain the human-readable label
decoded = imagenet_utils.decode_predictions(preds)
(imagenetID, label, prob) = decoded[0][0]
label = "{}: {:.2f}%".format(label, prob * 100)
print("[INFO] {}".format(label))

# initialize our gradient class activation map and build the heatmap
if args['layer'] == 'None':
    cam = GradCAM(model, i)
else:
    cam = GradCAM(model, i, args['layer'])

heatmap = cam.compute_heatmap(image)

# resize the resulting heatmap to the original input image dimensions
# and then overlay heatmap on top of the image
heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0]))
(heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.5)

# draw the predicted label on the output image
cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1)
cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
            (255, 255, 255), 2)

# display the original image and resulting heatmap and output image
# to our screen
output = np.hstack([orig, heatmap, output])
output = imutils.resize(output, height=400)
cv2.imshow("Output", output)
cv2.waitKey(0)
예제 #3
0
prob = np.max(preds[0])

label = "{}: {:.2f}%".format(label, prob * 100)
print("[INFO] {}".format(label))

# initialize our gradient class activation map and build the heatmap
cam = GradCAM(model, i)
heatmap = cam.compute_heatmap(image, normalize_grads=args["norm"])

# load the original image from disk (in OpenCV format)
orig = cv2.imread(args["image"])

# resize the resulting heatmap to the original input image dimensions
# and then overlay heatmap on top of the image
heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0]))
(heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.5, colormap=color_maps[args["color"]])

# draw the predicted label on the output image
cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1)
cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,
    0.8, (255, 255, 255), 2)

# display the original image and resulting heatmap and output image
# to our screen
output = np.vstack([orig, heatmap, output])
output = imutils.resize(output, height=700)

output_file = args["output"]
if output_file is not None:
    print("Activation map saved in "+output_file+"\n")
    cv2.imwrite(output_file, output)