def grad_cam(input_model, image, category_index, layer_name): y_c = input_model.output[0, category_index] conv_output = input_model.get_layer(layer_name).output grads = k.gradients(y_c, conv_output)[0] gradient_function = k.function([input_model.input], [conv_output, grads]) output, grads_val = gradient_function([image]) output, grads_val = output[0, :], grads_val[0, :, :, :] weights = np.mean(grads_val, axis=(0, 1)) cam = np.ones(output.shape[0:2], dtype=np.float32) for i, w in enumerate(weights): cam += w * output[:, :, i] cam = cv2.resize(cam, (img_width, img_height)) cam = np.maximum(cam, 0) heatmap = cam / np.max(cam) # Return to BGR [0..255] from the preprocessed image image = image[0, :] image -= np.min(image) image = np.minimum(image, 255) cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET) cam = np.float32(cam) + np.float32(image) cam = 255 * cam / np.max(cam) return np.uint8(cam), heatmap
def compile_saliency_function(model, activation_layer=layer_name): input_img = model.input layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) layer_output = layer_dict[activation_layer].output max_output = k.max(layer_output, axis=3) saliency = k.gradients(k.sum(max_output), input_img)[0] return k.function([input_img, k.learning_phase()], [saliency])
def test_tf_batch_map_offsets_grad(): np.random.seed(42) input = np.random.random((4, 100, 100)) offsets = np.random.random((4, 100, 100, 2)) * 2 input = K.variable(input) offsets = K.variable(offsets) tf_mapped_vals = tf_batch_map_offsets(input, offsets) grad = K.gradients(tf_mapped_vals, input)[0] grad = K.eval(grad) assert not np.allclose(grad, 0)
feature_layer_names = [ 'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1' ] for name in feature_layer_names: layer_features = outputs_dict[name] style_features = layer_features[1, :, :, :] gen_img_features = layer_features[2, :, :, :] s1 = style_loss(style_features, gen_img_features) # We need to devide the loss by the number of layers that we take into account loss += (STYLE_WEIGHT / len(feature_layer_names)) * s1 loss += TV_WEIGHT * total_variation_loss(gen_img) # Calculate gradients grads = K.gradients(loss, gen_img) outputs = [loss] if isinstance(grads, (list, tuple)): outputs += grads else: outputs.append(grads) # Define a Keras function f_output = K.function([gen_img], outputs) def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_h, img_w)) else: