def euclidean_distance(vects): ''' Auxiliary function to compute the Euclidian distance between two vectors in a Keras layer. ''' x, y = vects return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def gradient_penalty(y_true, y_pred, interpolate, lamb): grad = K.gradients(y_pred, interpolate)[0] norm = K.square(grad) norm_sum = K.sum(norm,axis=np.arange(1,len(norm.shape))) l2_norm = K.sqrt(norm_sum) gp_reg = lamb*K.square(1-l2_norm) return K.mean(gp_reg)
def __init__(self, input_tensor, losses, input_range=(0, 255), wrt_tensor=None, norm_grads=True): """Creates an optimizer that minimizes weighted loss function. Args: input_tensor: An input tensor of shape: `(samples, channels, image_dims...)` if `image_data_format= channels_first` or `(samples, image_dims..., channels)` if `image_data_format=channels_last`. losses: List of ([Loss](vis.losses#Loss), weight) tuples. input_range: Specifies the input range as a `(min, max)` tuple. This is used to rescale the final optimized input to the given range. (Default value=(0, 255)) wrt_tensor: Short for, with respect to. This instructs the optimizer that the aggregate loss from `losses` should be minimized with respect to `wrt_tensor`. `wrt_tensor` can be any tensor that is part of the model graph. Default value is set to None which means that loss will simply be minimized with respect to `input_tensor`. norm_grads: True to normalize gradients. Normalization avoids very small or large gradients and ensures a smooth gradient gradient descent process. If you want the actual gradient (for example, visualizing attention), set this to false. """ self.input_tensor = input_tensor self.input_range = input_range self.loss_names = [] self.loss_functions = [] self.wrt_tensor = self.input_tensor if wrt_tensor is None else wrt_tensor overall_loss = None for loss, weight in losses: # Perf optimization. Don't build loss function with 0 weight. if weight != 0: loss_fn = weight * loss.build_loss() overall_loss = loss_fn if overall_loss is None else overall_loss + loss_fn self.loss_names.append(loss.name) self.loss_functions.append(loss_fn) # Compute gradient of overall with respect to `wrt` tensor. grads = K.gradients(overall_loss, self.wrt_tensor)[0] if norm_grads: grads = grads / (K.sqrt(K.mean(K.square(grads))) + K.epsilon()) # The main function to compute various quantities in optimization loop. self.compute_fn = K.function([self.input_tensor, K.learning_phase()], self.loss_functions + [overall_loss, grads, self.wrt_tensor])
def generate_pattern(model, layer_name, filter_index, steps, learning_rate, size=224): layer_output = model.get_layer(layer_name).output loss = K.mean(layer_output[:, :, :, filter_index]) # obtain the gradient of the loss with respect to the model's input image grads_list = K.gradients(loss, model.input) grads = grads_list[0] # gradient normalization trick grads /= (K.sqrt(K.mean(K.square(grads))) + EPSILON) # fetch loss and normalized-gradients for a given input iterate = K.function(inputs=[model.input], outputs=[loss, grads]) # loss maximization via stochastic gradient descent input_img_data = np.random.random((1, size, size, 3)) * 20 + 128 # start from gray image with random noise for i in range(steps): loss_value, grads_value = iterate([input_img_data]) print('@{:-4d}: {:.4f}'.format(i, loss_value)) # gradient ascent: adjust the input image in the direction that maximizes the loss input_img_data += grads_value * learning_rate img_tensor = input_img_data[0] return tensor_to_image(img_tensor)
def euclidean_distance(y_true, y_pred): return K.sqrt(K.sum(K.square(y_true - y_pred), axis=1))
def normalize(x): return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)