def set_layers(self): ''' Set the bottom layers of the network ''' #for layer in self.model.layers: # layer.trainable = False x = self.model.output x = Dropout(self.dropout)(x) x = Dense(len(self.classes), activation='softmax')(x) self.model = kModel(self.model.input, x)
def compute_heatmap(self, image, eps=1e-8): # construct our gradient model by supplying (1) the inputs # to our pre-trained model, (2) the output of the (presumably) # final 4D layer in the network, and (3) the output of the # softmax activations from the model gradModel = kModel( inputs=[self.model.inputs], # inputs=[self.model.input], outputs=[self.model.get_layer(self.layerName).output, self.model.output]) # record operations for automatic differentiation with tf.GradientTape() as tape: # cast the image tensor to a float-32 data type, pass the # image through the gradient model, and grab the loss # associated with the specific class index inputs = tf.cast(image, tf.float32) (convOutputs, predictions) = gradModel(inputs) loss = predictions[:, self.classIdx] # use automatic differentiation to compute the gradients grads = tape.gradient(loss, convOutputs) # compute the guided gradients castConvOutputs = tf.cast(convOutputs > 0, "float32") castGrads = tf.cast(grads > 0, "float32") guidedGrads = castConvOutputs * castGrads * grads # the convolution and guided gradients have a batch dimension # (which we don't need) so let's grab the volume itself and # discard the batch convOutputs = convOutputs[0] guidedGrads = guidedGrads[0] # compute the average of the gradient values, and using them # as weights, compute the ponderation of the filters with # respect to the weights weights = tf.reduce_mean(guidedGrads, axis=(0, 1)) cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1) # grab the spatial dimensions of the input image and resize # the output class activation map to match the input image # dimensions (w, h) = (image.shape[2], image.shape[1]) heatmap = cv2.resize(cam.numpy(), (w, h)) # normalize the heatmap such that all values lie in the range # [0, 1], scale the resulting values to the range [0, 255], # and then convert to an unsigned 8-bit integer numer = heatmap - np.min(heatmap) denom = (heatmap.max() - heatmap.min()) + eps heatmap = numer / denom heatmap = (heatmap * 255).astype("uint8") # return the resulting heatmap to the calling function return heatmap
def set_layers(self): """Set up layers of network for training""" # Freeze all layers up to last conv block # https://gist.github.com/fchollet/7eb39b44eb9e16e59632d25fb3119975#gistcomment-2068023 for layer in self.model.layers[:15]: layer.trainable = False x = self.model.output x = Flatten()(x) x = Dense(256, activation='relu')(x) x = Dropout(0.7)(x) x = Dense(len(self.classes), activation='softmax')(x) self.model = kModel(inputs=self.model.input, outputs=x)