Пример #1
0
 def kl_cross_div_loss(y_true, y_pred):
     repeated_pred = K.repeat_elements(instancing_tensor, num_lines, 1)
     h_pred = K.permute_dimensions(repeated_pred, (0, 1, 2, 3))
     v_pred = K.permute_dimensions(repeated_pred, (0, 2, 1, 3))
     d = h_pred * K.log(h_pred / v_pred)
     loss_layer = mask_equal * d + mask_not_equal * K.maximum(
         0., margin - d)
     return K.identity(K.sum(loss_layer, axis=3) / 32.,
                       name='kl_cross_div_loss')
Пример #2
0
    def __init__(self,
                 input_tensor,
                 losses,
                 input_range=(0, 255),
                 wrt_tensor=None,
                 norm_grads=True):
        """Creates an optimizer that minimizes weighted loss function.

        Args:
            input_tensor: An input tensor of shape: `(samples, channels, image_dims...)` if `image_data_format=
                channels_first` or `(samples, image_dims..., channels)` if `image_data_format=channels_last`.
            losses: List of ([Loss](vis.losses#Loss), weight) tuples.
            input_range: Specifies the input range as a `(min, max)` tuple. This is used to rescale the
                final optimized input to the given range. (Default value=(0, 255))
            wrt_tensor: Short for, with respect to. This instructs the optimizer that the aggregate loss from `losses`
                should be minimized with respect to `wrt_tensor`.
                `wrt_tensor` can be any tensor that is part of the model graph. Default value is set to None
                which means that loss will simply be minimized with respect to `input_tensor`.
            norm_grads: True to normalize gradients. Normalization avoids very small or large gradients and ensures
                a smooth gradient gradient descent process. If you want the actual gradient
                (for example, visualizing attention), set this to false.
        """
        self.input_tensor = input_tensor
        self.input_range = input_range
        self.loss_names = []
        self.loss_functions = []
        self.wrt_tensor = self.input_tensor if wrt_tensor is None else wrt_tensor
        if self.input_tensor is self.wrt_tensor:
            self.wrt_tensor_is_input_tensor = True
            self.wrt_tensor = K.identity(self.wrt_tensor)
        else:
            self.wrt_tensor_is_input_tensor = False

        overall_loss = None
        for loss, weight in losses:
            # Perf optimization. Don't build loss function with 0 weight.
            if weight != 0:
                loss_fn = weight * loss.build_loss()
                overall_loss = loss_fn if overall_loss is None else overall_loss + loss_fn
                self.loss_names.append(loss.name)
                self.loss_functions.append(loss_fn)

        # Compute gradient of overall with respect to `wrt` tensor.
        if self.wrt_tensor_is_input_tensor:
            grads = K.gradients(overall_loss, self.input_tensor)[0]
        else:
            grads = K.gradients(overall_loss, self.wrt_tensor)[0]
        if norm_grads:
            grads = K.l2_normalize(grads)

        # The main function to compute various quantities in optimization loop.
        self.compute_fn = K.function(
            [self.input_tensor, K.learning_phase()],
            self.loss_functions + [overall_loss, grads, self.wrt_tensor])
Пример #3
0
 def tn_over_gt_n(y_true, y_pred):
     return K.identity(true_negatives / num_not_equal, name="tn_over_gt_n")
Пример #4
0
 def tn_over_pred_n(y_ture, y_pred):
     return K.identity(true_negatives / num_negatives,
                       name="tn_over_pred_n")
Пример #5
0
 def _apply_map(self, x):
     return K.identity(x)
Пример #6
0
    def _initialize_params(self, model, use_logits, input_layer, output_layer):
        """
        Initialize most parameters of the classifier. This is a convenience function called by `__init__` and
        `__setstate__` to avoid code duplication.

        :param model: Keras model
        :type model: `keras.models.Model`
        :param use_logits: True if the output of the model are logits.
        :type use_logits: `bool`
        :param input_layer: Which layer to consider as the Input when the model has multiple input layers.
        :type input_layer: `int`
        :param output_layer: Which layer to consider as the Output when the model has multiple output layers.
        :type output_layer: `int`
        """
        # pylint: disable=E0401
        if self.is_tensorflow:
            import tensorflow as tf
            if tf.executing_eagerly():
                raise ValueError(
                    'TensorFlow is executing eagerly. Please disable eager execution.'
                )
            import tensorflow.keras as keras
            import tensorflow.keras.backend as k
        else:
            import keras
            import keras.backend as k

        if hasattr(model, 'inputs'):
            self._input_layer = input_layer
            self._input = model.inputs[input_layer]
        else:
            self._input = model.input
            self._input_layer = 0

        if hasattr(model, 'outputs'):
            self._output = model.outputs[output_layer]
            self._output_layer = output_layer
        else:
            self._output = model.output
            self._output_layer = 0

        _, self._nb_classes = k.int_shape(self._output)
        self._input_shape = k.int_shape(self._input)[1:]
        logger.debug(
            'Inferred %i classes and %s as input shape for Keras classifier.',
            self.nb_classes(), str(self.input_shape))

        # Get predictions and loss function
        self._use_logits = use_logits
        if not hasattr(self._model, 'loss'):
            logger.warning(
                'Keras model has no loss set. Classifier tries to use `k.sparse_categorical_crossentropy`.'
            )
            loss_function = k.sparse_categorical_crossentropy
        else:
            if isinstance(self._model.loss, six.string_types):
                loss_function = getattr(k, self._model.loss)
            elif self._model.loss.__name__ in [
                    'categorical_hinge', 'kullback_leibler_divergence',
                    'cosine_proximity'
            ]:
                if self.is_tensorflow and self._model.loss.__name__ == 'cosine_proximity':
                    loss_function = tf.keras.losses.cosine_similarity
                else:
                    loss_function = getattr(keras.losses,
                                            self._model.loss.__name__)
            else:
                loss_function = getattr(k, self._model.loss.__name__)

        if loss_function.__name__ in [
                'categorical_hinge', 'categorical_crossentropy',
                'binary_crossentropy', 'kullback_leibler_divergence',
                'cosine_proximity'
        ]:
            self._reduce_labels = False
            label_ph = k.placeholder(shape=self._output.shape)
        elif loss_function.__name__ in ['sparse_categorical_crossentropy']:
            self._reduce_labels = True
            label_ph = k.placeholder(shape=[
                None,
            ])
        else:
            raise ValueError('Loss function not recognised.')

        # The implementation of categorical_crossentropy is different in keras and tensorflow.keras. To ensure
        # consistent behavior of `KerasClassifier` for keras and tensorflow.keras we follow the approach of
        # tensorflow.keras for all cases of `from_logits` if the loss_function is categorical_crossentropy.
        if hasattr(self._model, 'loss') and isinstance(self._model.loss, six.string_types) \
                and loss_function.__name__ == 'categorical_crossentropy':
            predictions = self._output
            loss_ = loss_function(label_ph,
                                  self._output.op.inputs[-1],
                                  from_logits=True)
        elif loss_function.__name__ in [
                'categorical_hinge', 'cosine_proximity',
                'kullback_leibler_divergence'
        ]:
            predictions = self._output
            loss_ = loss_function(label_ph, self._output.op.inputs[-1])
        else:
            predictions = self._output
            loss_ = loss_function(label_ph,
                                  self._output,
                                  from_logits=use_logits)

        # recent TensorFlow version does not allow a model with an output same as the input.
        if predictions == self._input:
            predictions = k.identity(predictions)

        loss_gradients = k.gradients(loss_, self._input)
        if k.backend() == 'tensorflow':
            loss_gradients = loss_gradients[0]
        elif k.backend() == 'cntk':
            raise NotImplementedError(
                'Only TensorFlow and Theano support is provided for Keras.')

        # Set loss, gradients and prediction functions
        self._predictions_op = predictions
        self._loss = loss_
        self._loss_gradients = k.function([self._input, label_ph],
                                          [loss_gradients])
        self._predictions = k.function([self._input], [predictions])

        # Get the internal layer
        self._layer_names = self._get_layers()
Пример #7
0
 def identity(x):
     return K.identity(x)