Exemple #1
0
    def evaluate(self, data, fn_inverse=None, horizon=1, fn_plot=None):
        """
        Evaluate model
        :return:
        """
        encoder_input_data, decoder_input_exog, y = data

        y_hat = self.predict(encoder_inputs=encoder_input_data,
                             pred_steps=horizon,
                             decoder_input_exog=decoder_input_exog)

        if fn_inverse is not None:
            y = fn_inverse(y)
            y_hat = fn_inverse(y_hat)

        y = np.float32(y)
        y_hat = np.float32(y_hat)

        if fn_plot is not None:
            fn_plot([y, y_hat])

        results = []
        for m in self.model.metrics:
            if isinstance(m, str):
                results.append(K.eval(K.mean(get(m)(y, y_hat))))
            else:
                results.append(K.eval(K.mean(m(y, y_hat))))
        return results
 def _extract_metrics(self):
     # We want a dictionary that maps an output tensor id to a list of metrics.
     metrics = self._model.metrics
     if metrics is None:
         metrics = {}
     if not isinstance(metrics, dict):
         metrics = {
             self._output_specs[i].identifier: metrics
             for i in range(len(self._output_specs))
         }
     else:
         # Keras stores dicts that map an output layer name to a (list of) metric(s).
         # We want an output tensor id.
         temp = {}
         for layer_name, metric in metrics.items():
             if isinstance(metric, str) or callable(metric):
                 # normalize to list
                 metric = [metric]
             for tensor_id in self._output_layer_tensor_ids[layer_name]:
                 temp[tensor_id] = metric
         metrics = temp
         import keras.metrics as km
         for tensor, metric in metrics.items():
             for idx, met in enumerate(metric):
                 metrics[tensor][idx] = km.get(
                     met
                 ).__name__ if met != 'accuracy' and met != 'acc' else 'acc'
     return metrics
Exemple #3
0
    def evaluate(self, inputs, fn_inverse=None, fn_plot=None):
        try:
            X, y = inputs
        except:
            X, y, _ = inputs
        try:
            X, exogenous = X
        except:
            exogenous = None
        y_hat = self.predict(X, exogenous)

        if fn_inverse is not None:
            y_hat = fn_inverse(y_hat)
            y = fn_inverse(y)

        if fn_plot is not None:
            fn_plot([y, y_hat])

        results = []
        for m in self.model.metrics:
            if isinstance(m, str):
                results.append(K.eval(K.mean(get(m)(y, y_hat))))
            else:
                results.append(K.eval(K.mean(m(y, y_hat))))
        return results
Exemple #4
0
    def _get_metric_object(self, metric, y_t, y_p):
        """Converts user-supplied metric to a `Metric` object.

    Args:
      metric: A string, function, or `Metric` object.
      y_t: Sample of label.
      y_p: Sample of output.

    Returns:
      A `Metric` object.
    """
        if metric is None:
            return None  # Ok to have no metric for an output.

        # Convenience feature for selecting b/t binary, categorical,
        # and sparse categorical.
        if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:
            metric_obj = metrics_mod.get(metric)
        else:
            y_t_rank = len(y_t.shape.as_list())
            y_p_rank = len(y_p.shape.as_list())
            y_t_last_dim = y_t.shape.as_list()[-1]
            y_p_last_dim = y_p.shape.as_list()[-1]

            is_binary = y_p_last_dim == 1
            is_sparse_categorical = (y_t_rank < y_p_rank
                                     or y_t_last_dim == 1 and y_p_last_dim > 1)

            if metric in ['accuracy', 'acc']:
                if is_binary:
                    metric_obj = metrics_mod.binary_accuracy
                elif is_sparse_categorical:
                    metric_obj = metrics_mod.sparse_categorical_accuracy
                else:
                    metric_obj = metrics_mod.categorical_accuracy
            else:
                if is_binary:
                    metric_obj = metrics_mod.binary_crossentropy
                elif is_sparse_categorical:
                    metric_obj = metrics_mod.sparse_categorical_crossentropy
                else:
                    metric_obj = metrics_mod.categorical_crossentropy

        if isinstance(metric_obj, losses_mod.Loss):
            metric_obj._allow_sum_over_batch_size = True  # pylint: disable=protected-access

        if not isinstance(metric_obj, metrics_mod.Metric):
            if isinstance(metric, six.string_types):
                metric_name = metric
            else:
                metric_name = get_custom_object_name(metric)
                if metric_name is None:
                    raise ValueError(
                        'Metric should be a callable, found: {}'.format(
                            metric))

            metric_obj = metrics_mod.MeanMetricWrapper(metric_obj,
                                                       name=metric_name)

        return metric_obj
Exemple #5
0
    def evaluate(self, data, fn_inverse=None, fn_plot=None):
        try:
            X, y = data
            y_hat = self.predict(X)
        except:
            X, X_ex, y = data
            y_hat = self.predict([X, X_ex])

        if fn_inverse is not None:
            y = fn_inverse(y)
            y_hat = fn_inverse(y_hat)

        y = np.float32(y)
        y_hat = np.float32(y_hat)

        if fn_plot is not None:
            fn_plot([y, y_hat])

        results = []
        for m in self.model.metrics:
            if isinstance(m, str):
                results.append(K.eval(K.mean(get(m)(y, y_hat))))
            else:
                results.append(K.eval(K.mean(m(y, y_hat))))
        return results
Exemple #6
0
 def _eval(self, y, y_hat):
     results = []
     for m in self.model.metrics:
         if isinstance(m, str):
             results.append(K.eval(K.mean(get(m)(y, y_hat))))
         else:
             results.append(K.eval(K.mean(m(y, y_hat))))
     return results
Exemple #7
0
 def from_config(cls, config):
     from keras.metrics import get  # pylint: disable=g-import-not-at-top
     # Note that while MeanMetricWrapper itself isn't public, objects of this
     # class may be created and added to the model by calling model.compile.
     fn = config.pop('fn', None)
     if cls is MeanMetricWrapper:
         return cls(get(fn), **config)
     return super(MeanMetricWrapper, cls).from_config(config)
def get_metrics(identifier):
    '''return metrics
    '''
    if isinstance(identifier, str):
        if metrics_collection.get(identifier) is not None:
            return metrics_collection.get(identifier)
        else:
            return get(identifier)
    elif callable(identifier):
        return identifier
def get_metric_name(name):
    """
    Gives the keras name for a metric

    Parameters
    ----------
    name : str
        original name of the metric
    Returns
    -------

    """
    if name == 'acc' or name == 'accuracy':
        return 'acc'
    try:
        metric_fn = metrics.get(name)
        return metric_fn.__name__
    except:
        pass
    return name
Exemple #10
0
def get_metric_name(name):
    """
    Gives the keras name for a metric

    Parameters
    ----------
    name : str
        original name of the metric
    Returns
    -------

    """
    if name == 'acc' or name == 'accuracy':
        return 'acc'
    try:
        metric_fn = metrics.get(name)
        return metric_fn.__name__
    except:
        pass
    return name
Exemple #11
0
    def evaluate(self, inputs, fn_inverse=None, fn_plot=None):
        try:
            X, y_exog, y = inputs
            y_hat = self.model.predict([X, y_exog])
        except:
            X, y = inputs
            y_hat = self.model.predict(X)
        y_hat = np.asarray(y_hat, dtype=y.dtype)

        if fn_inverse is not None:
            y_hat = fn_inverse(y_hat)
            y = fn_inverse(y)

        if fn_plot is not None:
            fn_plot([y, y_hat])

        results = []
        for m in self.model.metrics:
            if isinstance(m, str):
                results.append(K.eval(K.mean(get(m)(y, y_hat))))
            else:
                results.append(K.eval(K.mean(m(y, y_hat))))
        return results
    def compile_tfrecord(self, optimizer, loss, y, metrics=None, y_val=None):
        """Configures the model for training.

    # Arguments
        optimizer: str (name of optimizer) or optimizer object.
          See [optimizers](/optimizers).
        loss: str (name of objective function) or objective function.
          See [losses](/losses).
          If the model has multiple outputs, you can use a different loss
          on each output by passing a dictionary or a list of losses.
          The loss value that will be minimized by the model
          will then be the sum of all individual losses.
        metrics: list of metrics to be evaluated by the model
          during training and testing.
          Typically you will use `metrics=['accuracy']`.
          To specify different metrics for different outputs of a
          multi-output model, you could also pass a dictionary,
          such as `metrics={'output_a': 'accuracy'}`.

    # Raises
        ValueError: In case of invalid arguments for
            `optimizer`, `loss`, `metrics` or `sample_weight_mode`.
    """
        loss = loss or {}
        self.optimizer = optimizers.get(optimizer)
        self.loss = loss
        self.sample_weight_mode = None
        self.loss_weights = None
        self.y_val = y_val

        do_validation = bool(len(self.val_inputs) > 0)
        if do_validation and y_val is None:
            raise ValueError('When you use validation inputs, '
                             'you should provide y_val.')

        # Prepare loss functions.
        if isinstance(loss, dict):
            for name in loss:
                if name not in self.output_names:
                    raise ValueError('Unknown entry in loss '
                                     'dictionary: "' + name + '". '
                                     'Only expected the following keys: ' +
                                     str(self.output_names))
            loss_functions = []
            for name in self.output_names:
                if name not in loss:
                    warnings.warn('Output "' + name +
                                  '" missing from loss dictionary. '
                                  'We assume this was done on purpose, '
                                  'and we will not be expecting '
                                  'any data to be passed to "' + name +
                                  '" during training.',
                                  stacklevel=2)
                loss_functions.append(losses.get(loss.get(name)))
        elif isinstance(loss, list):
            if len(loss) != len(self.outputs):
                raise ValueError('When passing a list as loss, '
                                 'it should have one entry per model outputs. '
                                 'The model has ' + str(len(self.outputs)) +
                                 ' outputs, but you passed loss=' + str(loss))
            loss_functions = [losses.get(l) for l in loss]
        else:
            loss_function = losses.get(loss)
            loss_functions = [loss_function for _ in range(len(self.outputs))]
        self.loss_functions = loss_functions

        # Prepare training targets of model.
        if isinstance(y, (list, tuple)):
            y = list(y)  # Tensor or list of tensors.
        else:
            y = [y]
        self.targets = []
        for i in range(len(self.outputs)):
            target = y[i]
            self.targets.append(target)

        # Prepare validation targets of model.
        if isinstance(y_val, (list, tuple)):
            y_val = list(y_val)  # Tensor or list of tensors.
        else:
            y_val = [y_val]
        self.y_val = y_val
        self.val_targets = []
        for i in range(len(self.val_outputs)):
            val_target = y_val[i]
            self.val_targets.append(val_target)

        # Prepare metrics.
        self.metrics = metrics
        self.metrics_names = ['loss']
        self.metrics_tensors = []
        self.val_metrics_names = ['loss']
        self.val_metrics_tensors = []

        # Compute total training loss.
        total_loss = None
        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            loss_function = loss_functions[i]
            val_output_loss = K.mean(loss_function(y_true, y_pred))
            if len(self.outputs) > 1:
                self.metrics_tensors.append(val_output_loss)
                self.metrics_names.append(self.output_names[i] + '_loss')
            if total_loss is None:
                total_loss = val_output_loss
            else:
                total_loss += val_output_loss
        if total_loss is None:
            if not self.losses:
                raise RuntimeError('The model cannot be compiled '
                                   'because it has no loss to optimize.')
            else:
                total_loss = 0.

        # Compute total validation loss.
        val_total_loss = None
        for i in range(len(self.val_outputs)):
            y_true = self.val_targets[i]
            y_pred = self.val_outputs[i]
            loss_function = loss_functions[i]
            val_output_loss = K.mean(loss_function(y_true, y_pred))
            if len(self.outputs) > 1:
                self.val_metrics_tensors.append(val_output_loss)
                self.val_metrics_names.append(self.output_names[i] +
                                              '_val_loss')
            if val_total_loss is None:
                val_total_loss = val_output_loss
            else:
                val_total_loss += val_output_loss
        if val_total_loss is None:
            if not self.losses and do_validation:
                raise RuntimeError('The model cannot be compiled '
                                   'because it has no loss to optimize.')
            else:
                val_total_loss = 0.

        # Add regularization penalties
        # and other layer-specific losses.
        for loss_tensor in self.losses:
            total_loss += loss_tensor
            val_total_loss += loss_tensor

        # List of same size as output_names.
        # contains tuples (metrics for output, names of metrics).
        nested_metrics = _collect_metrics(metrics, self.output_names)

        def append_metric(layer_num, metric_name, metric_tensor):
            """Helper function used in loop below."""
            if len(self.output_names) > 1:
                metric_name = self.output_layers[
                    layer_num].name + '_' + metric_name
            self.metrics_names.append(metric_name)
            self.metrics_tensors.append(metric_tensor)

        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            output_metrics = nested_metrics[i]
            for metric in output_metrics:
                if metric == 'accuracy' or metric == 'acc':
                    # custom handling of accuracy
                    # (because of class mode duality)
                    output_shape = self.internal_output_shapes[i]
                    acc_fn = None
                    if (output_shape[-1] == 1 or self.loss_functions[i]
                            == losses.binary_crossentropy):
                        # case: binary accuracy
                        acc_fn = metrics_module.binary_accuracy
                    elif self.loss_functions[
                            i] == losses.sparse_categorical_crossentropy:
                        # case: categorical accuracy with sparse targets
                        acc_fn = metrics_module.sparse_categorical_accuracy
                    else:
                        acc_fn = metrics_module.categorical_accuracy

                    append_metric(i, 'acc', K.mean(acc_fn(y_true, y_pred)))
                else:
                    metric_fn = metrics_module.get(metric)
                    metric_result = metric_fn(y_true, y_pred)
                    metric_result = {metric_fn.__name__: metric_result}
                    for name, tensor in six.iteritems(metric_result):
                        append_metric(i, name, tensor)

        def append_val_metric(layer_num, metric_name, metric_tensor):
            """Helper function used in loop below."""
            if len(self.output_names) > 1:
                metric_name = self.output_layers[
                    layer_num].name + '_val_' + metric_name
            self.val_metrics_names.append(metric_name)
            self.val_metrics_tensors.append(metric_tensor)

        for i in range(len(self.val_outputs)):
            y_true = self.val_targets[i]
            y_pred = self.val_outputs[i]
            output_metrics = nested_metrics[i]
            for metric in output_metrics:
                if metric == 'accuracy' or metric == 'acc':
                    # custom handling of accuracy
                    # (because of class mode duality)
                    output_shape = self.internal_output_shapes[i]
                    acc_fn = None
                    if (output_shape[-1] == 1 or self.loss_functions[i]
                            == losses.binary_crossentropy):
                        # case: binary accuracy
                        acc_fn = metrics_module.binary_accuracy
                    elif self.loss_functions[
                            i] == losses.sparse_categorical_crossentropy:
                        # case: categorical accuracy with sparse targets
                        acc_fn = metrics_module.sparse_categorical_accuracy
                    else:
                        acc_fn = metrics_module.categorical_accuracy

                    append_val_metric(i, 'acc', K.mean(acc_fn(y_true, y_pred)))
                else:
                    metric_fn = metrics_module.get(metric)
                    metric_result = metric_fn(y_true, y_pred)
                    metric_result = {metric_fn.__name__: metric_result}
                    for name, tensor in six.iteritems(metric_result):
                        append_val_metric(i, name, tensor)

        # Prepare gradient updates and state updates.
        self.total_loss = total_loss
        self.val_total_loss = val_total_loss

        # Functions for train, test and predict will
        # be compiled lazily when required.
        # This saves time when the user is not using all functions.
        self.train_function = None
        self.val_function = None
        self.test_function = None
        self.predict_function = None

        # Collected trainable weights and sort them deterministically.
        trainable_weights = self.trainable_weights
        # Sort weights by name.
        if trainable_weights:
            trainable_weights.sort(key=lambda x: x.name)
        self._collected_trainable_weights = trainable_weights
Exemple #13
0
    def compile(self,
                optimizer,
                loss,
                metrics=[],
                loss_weights=None,
                sample_weight_mode=None,
                **kwargs):
        #super(sModel, self).compile(optimizer, loss, metrics, loss_weights,
        #                            sample_weights_mode, **kwargs)
        self.optimizer = optimizers.get(optimizer)
        self.sample_weight_mode = sample_weight_mode
        self.loss = loss
        self.loss_weights = loss_weights

        # prepare loss weights
        if loss_weights is None:
            loss_weights_list = [1. for _ in range(len(self.outputs))]
        elif isinstance(loss_weights, dict):
            for name in loss_weights:
                if name not in self.output_names:
                    raise ValueError('Unknown entry in loss_weights '
                                     'dictionary: "' + name + '". '
                                     'Only expected the following keys: ' +
                                     str(self.output_names))
            loss_weights_list = []
            for name in self.output_names:
                loss_weights_list.append(loss_weights.get(name, 1.))
        elif isinstance(loss_weights, list):
            if len(loss_weights) != len(self.outputs):
                raise ValueError('When passing a list as loss_weights, '
                                 'it should have one entry per model outputs. '
                                 'The model has ' + str(len(self.outputs)) +
                                 ' outputs, but you passed loss_weights=' +
                                 str(loss_weights))
            loss_weights_list = loss_weights
        else:
            raise TypeError('Could not interpret loss_weights argument: ' +
                            str(loss_weights) + ' - expected a list of dicts.')

        # prepare loss functions
        if isinstance(loss, dict):
            for name in loss:
                if name not in self.output_names:
                    raise ValueError('Unknown entry in loss '
                                     'dictionary: "' + name + '". '
                                     'Only expected the following keys: ' +
                                     str(self.output_names))
            loss_functions = []
            for name in self.output_names:
                if name not in loss:
                    raise ValueError('Output "' + name +
                                     '" missing from loss dictionary.')
                loss_functions.append(objectives.get(loss[name]))
        elif isinstance(loss, list):
            if len(loss) != len(self.outputs):
                raise ValueError('When passing a list as loss, '
                                 'it should have one entry per model outputs. '
                                 'The model has ' + str(len(self.outputs)) +
                                 ' outputs, but you passed loss=' + str(loss))
            loss_functions = [objectives.get(l) for l in loss]
        else:
            loss_function = objectives.get(loss)
            loss_functions = [loss_function for _ in range(len(self.outputs))]
        self.loss_functions = loss_functions
        weighted_losses = [weighted_objective(fn) for fn in loss_functions]

        # prepare output masks
        masks = self.compute_mask(self.inputs, mask=None)
        if masks is None:
            masks = [None for _ in self.outputs]
        if not isinstance(masks, list):
            masks = [masks]

        # prepare sample weights
        if isinstance(sample_weight_mode, dict):
            for name in sample_weight_mode:
                if name not in self.output_names:
                    raise ValueError('Unknown entry in '
                                     'sample_weight_mode dictionary: "' +
                                     name + '". '
                                     'Only expected the following keys: ' +
                                     str(self.output_names))
            sample_weights = []
            sample_weight_modes = []
            for name in self.output_names:
                if name not in sample_weight_mode:
                    raise ValueError('Output "' + name +
                                     '" missing from sample_weight_modes '
                                     'dictionary')
                if sample_weight_mode.get(name) == 'temporal':
                    weight = K.placeholder(ndim=2,
                                           name=name + '_sample_weights')
                    sample_weight_modes.append('temporal')
                else:
                    weight = K.placeholder(ndim=1,
                                           name=name + '_sample_weights')
                    sample_weight_modes.append(None)
                sample_weights.append(weight)
        elif isinstance(sample_weight_mode, list):
            if len(sample_weight_mode) != len(self.outputs):
                raise ValueError('When passing a list as sample_weight_mode, '
                                 'it should have one entry per model outputs. '
                                 'The model has ' + str(len(self.outputs)) +
                                 ' outputs, but you passed '
                                 'sample_weight_mode=' +
                                 str(sample_weight_mode))
            sample_weights = []
            sample_weight_modes = []
            for mode, name in zip(sample_weight_mode, self.output_names):
                if mode == 'temporal':
                    weight = K.placeholder(ndim=2,
                                           name=name + '_sample_weights')
                    sample_weight_modes.append('temporal')
                else:
                    weight = K.placeholder(ndim=1,
                                           name=name + '_sample_weights')
                    sample_weight_modes.append(None)
                sample_weights.append(weight)
        else:
            if sample_weight_mode == 'temporal':
                sample_weights = [
                    K.placeholder(ndim=2, name=name + '_sample_weights')
                    for name in self.output_names
                ]
                sample_weight_modes = [
                    'temporal' for name in self.output_names
                ]
            else:
                sample_weights = [
                    K.placeholder(ndim=1, name=name + '_sample_weights')
                    for name in self.output_names
                ]
                sample_weight_modes = [None for name in self.output_names]
        self.sample_weight_modes = sample_weight_modes

        # prepare targets of model
        self.targets = []
        for i in range(len(self.outputs)):
            shape = self.internal_output_shapes[i]
            name = self.output_names[i]
            self.targets.append(
                K.placeholder(ndim=len(shape),
                              name=name + '_target',
                              sparse=K.is_sparse(self.outputs[i]),
                              dtype=K.dtype(self.outputs[i])))

        # prepare metrics
        self.metrics = metrics
        self.metrics_names = ['loss']
        self.metrics_tensors = []

        # compute total loss
        total_loss = None
        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            weighted_loss = weighted_losses[i]
            sample_weight = sample_weights[i]
            mask = masks[i]
            loss_weight = loss_weights_list[i]
            output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
            if len(self.outputs) > 1:
                self.metrics_tensors.append(output_loss)
                self.metrics_names.append(self.output_names[i] + '_loss')
            if total_loss is None:
                total_loss = loss_weight * output_loss
            else:
                total_loss += loss_weight * output_loss

        # add regularization penalties
        # and other layer-specific losses
        for loss_tensor in self.losses:
            total_loss += loss_tensor

        # list of same size as output_names.
        # contains tuples (metrics for output, names of metrics)
        nested_metrics = collect_metrics(metrics, self.output_names)

        def append_metric(layer_num, metric_name, metric_tensor):
            """Helper function, used in loop below"""
            if len(self.output_names) > 1:
                metric_name = self.output_layers[
                    layer_num].name + '_' + metric_name

            self.metrics_names.append(metric_name)
            self.metrics_tensors.append(metric_tensor)

        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            output_metrics = nested_metrics[i]

            for metric in output_metrics:
                if metric == 'accuracy' or metric == 'acc':
                    # custom handling of accuracy
                    # (because of class mode duality)
                    output_shape = self.internal_output_shapes[i]
                    acc_fn = None
                    if output_shape[-1] == 1 or self.loss_functions[
                            i] == objectives.binary_crossentropy:
                        # case: binary accuracy
                        acc_fn = metrics_module.binary_accuracy
                    elif self.loss_functions[
                            i] == objectives.sparse_categorical_crossentropy:
                        # case: categorical accuracy with sparse targets
                        acc_fn = metrics_module.sparse_categorical_accuracy
                    else:
                        acc_fn = metrics_module.categorical_accuracy

                    append_metric(i, 'acc', acc_fn(y_true, y_pred))
                else:
                    metric_fn = metrics_module.get(metric)
                    metric_result = metric_fn(y_true, y_pred)

                    if not isinstance(metric_result, dict):
                        metric_result = {metric_fn.__name__: metric_result}

                    for name, tensor in six.iteritems(metric_result):
                        append_metric(i, name, tensor)

        # prepare gradient updates and state updates
        self.total_loss = total_loss
        self.sample_weights = sample_weights

        # functions for train, test and predict will
        # be compiled lazily when required.
        # This saves time when the user is not using all functions.
        self._function_kwargs = kwargs

        self.train_function = None
        self.test_function = None
        self.predict_function = None

        # collected trainable weights and sort them deterministically.
        trainable_weights = self.trainable_weights
        # Sort weights by name
        if trainable_weights:
            if K.backend() == 'theano':
                trainable_weights.sort(
                    key=lambda x: x.name if x.name else x.auto_name)
            else:
                trainable_weights.sort(key=lambda x: x.name)
        self._collected_trainable_weights = trainable_weights
def evaluate_keras_metric(y_true, y_pred, metric):
    objective_function = metrics.get(metric)
    objective = objective_function(y_true, y_pred)
    return K.eval(objective)
Exemple #15
0
def compile_tfrecord(train_model,
                     optimizer,
                     loss,
                     out_tensor_lst,
                     metrics=[],
                     loss_weights=None):
    train_model.build(train_model)
    # train_model.build()

    train_model.optimizer = optimizers.get(optimizer)
    train_model.loss = loss
    train_model.loss_weights = loss_weights

    # prepare loss weights
    if loss_weights is None:
        loss_weights_list = [1. for _ in range(len(train_model.outputs))]
    elif isinstance(loss_weights, dict):
        for name in loss_weights:
            if name not in train_model.output_names:
                raise ValueError('Unknown entry in loss_weights '
                                 'dictionary: "' + name + '". '
                                 'Only expected the following keys: ' +
                                 str(train_model.output_names))
        loss_weights_list = []
        for name in train_model.output_names:
            loss_weights_list.append(loss_weights.get(name, 1.))
    elif isinstance(loss_weights, list):
        if len(loss_weights) != len(train_model.outputs):
            raise ValueError('When passing a list as loss_weights, '
                             'it should have one entry per model outputs. '
                             'The model has ' + str(len(train_model.outputs)) +
                             ' outputs, but you passed loss_weights=' +
                             str(loss_weights))
        loss_weights_list = loss_weights
    else:
        raise TypeError('Could not interpret loss_weights argument: ' +
                        str(loss_weights) + ' - expected a list of dicts.')

    # prepare loss functions
    if isinstance(loss, dict):
        for name in loss:
            if name not in train_model.output_names:
                raise ValueError('Unknown entry in loss '
                                 'dictionary: "' + name + '". '
                                 'Only expected the following keys: ' +
                                 str(train_model.output_names))
        loss_functions = []
        for name in train_model.output_names:
            if name not in loss:
                raise ValueError('Output "' + name +
                                 '" missing from loss dictionary.')
            loss_functions.append(objectives.get(loss[name]))
    elif isinstance(loss, list):
        if len(loss) != len(train_model.outputs):
            raise ValueError('When passing a list as loss, '
                             'it should have one entry per model outputs. '
                             'The model has ' + str(len(train_model.outputs)) +
                             ' outputs, but you passed loss=' + str(loss))
        loss_functions = [objectives.get(l) for l in loss]
    else:
        loss_function = objectives.get(loss)
        loss_functions = [
            loss_function for _ in range(len(train_model.outputs))
        ]
    train_model.loss_functions = loss_functions
    weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]

    # prepare metrics
    train_model.metrics = metrics
    train_model.metrics_names = ['loss']
    train_model.metrics_tensors = []

    # compute total loss
    total_loss = None
    for i in range(len(train_model.outputs)):
        y_true = out_tensor_lst[i]
        y_pred = train_model.outputs[i]
        _loss = loss_functions[i]
        # _loss = weighted_losses[i]
        loss_weight = loss_weights_list[i]
        # output_loss = _loss(y_true, y_pred, None, None)
        output_loss = K.mean(_loss(y_true, y_pred))
        if len(train_model.outputs) > 1:
            train_model.metrics_tensors.append(output_loss)
            train_model.metrics_names.append(train_model.output_names[i] +
                                             '_loss')
        if total_loss is None:
            total_loss = loss_weight * output_loss
        else:
            total_loss += loss_weight * output_loss

    # add regularization penalties
    # and other layer-specific losses
    for loss_tensor in train_model.losses:
        total_loss += loss_tensor

    # list of same size as output_names.
    # contains tuples (metrics for output, names of metrics)
    nested_metrics = _collect_metrics(metrics, train_model.output_names)

    def append_metric(layer_num, metric_name, metric_tensor):
        """Helper function, used in loop below"""
        if len(train_model.output_names) > 1:
            metric_name = train_model.output_layers[
                layer_num].name + '_' + metric_name

        train_model.metrics_names.append(metric_name)
        train_model.metrics_tensors.append(metric_tensor)

    for i in range(len(train_model.outputs)):
        y_true = out_tensor_lst[i]
        y_pred = train_model.outputs[i]
        output_metrics = nested_metrics[i]

        for metric in output_metrics:
            if metric == 'accuracy' or metric == 'acc':
                # custom handling of accuracy
                # (because of class mode duality)
                output_shape = train_model.internal_output_shapes[i]
                acc_fn = None
                if output_shape[-1] == 1 or train_model.loss_functions[
                        i] == objectives.binary_crossentropy:
                    # case: binary accuracy
                    acc_fn = metrics_module.binary_accuracy
                elif train_model.loss_functions[
                        i] == objectives.sparse_categorical_crossentropy:
                    # case: categorical accuracy with sparse targets
                    acc_fn = metrics_module.sparse_categorical_accuracy
                else:
                    acc_fn = metrics_module.categorical_accuracy

                append_metric(i, 'acc', acc_fn(y_true, y_pred))
            else:
                metric_fn = metrics_module.get(metric)
                metric_result = metric_fn(y_true, y_pred)

                if not isinstance(metric_result, dict):
                    metric_result = {metric_fn.__name__: metric_result}

                for name, tensor in six.iteritems(metric_result):
                    append_metric(i, name, tensor)

    # prepare gradient updates and state updates
    train_model.optimizer = optimizers.get(optimizer)
    train_model.total_loss = total_loss

    train_model.train_function = None
    train_model.test_function = None
    train_model.predict_function = None

    # collected trainable weights and sort them deterministically.
    trainable_weights = train_model.trainable_weights
    # Sort weights by name
    trainable_weights.sort(key=lambda x: x.name)
    train_model._collected_trainable_weights = trainable_weights
def evaluate_keras_metric(y_true, y_pred, metric):
    objective_function = metrics.get(metric)
    objective = objective_function(y_true, y_pred)
    return K.eval(objective)
Exemple #17
0
def test_invalid_get():

    with pytest.raises(ValueError):
        metrics.get(5)
Exemple #18
0
def test_invalid_get():

    with pytest.raises(ValueError):
        metrics.get(5)
Exemple #19
0
    def _get_metric_object(self, metric, y_t, y_p):
        """Converts user-supplied metric to a `Metric` object.

        Args:
          metric: A string, function, or `Metric` object.
          y_t: Sample of label.
          y_p: Sample of output.

        Returns:
          A `Metric` object.
        """
        if metric is None:
            return None  # Ok to have no metric for an output.

        # Convenience feature for selecting b/t binary, categorical,
        # and sparse categorical.
        if str(metric).lower() not in [
                "accuracy", "acc", "crossentropy", "ce"
        ]:
            metric_obj = metrics_mod.get(metric)
        else:
            y_t_rank = len(y_t.shape.as_list())
            y_p_rank = len(y_p.shape.as_list())
            y_t_last_dim = y_t.shape.as_list()[-1]
            y_p_last_dim = y_p.shape.as_list()[-1]

            is_binary = y_p_last_dim == 1
            is_sparse_categorical = (y_t_rank < y_p_rank
                                     or y_t_last_dim == 1 and y_p_last_dim > 1)

            if str(metric).lower() in ["accuracy", "acc"]:
                if is_binary:
                    metric_obj = metrics_mod.binary_accuracy
                elif is_sparse_categorical:
                    metric_obj = metrics_mod.sparse_categorical_accuracy
                else:
                    metric_obj = metrics_mod.categorical_accuracy
            else:
                if is_binary:
                    metric_obj = metrics_mod.binary_crossentropy
                elif is_sparse_categorical:
                    metric_obj = metrics_mod.sparse_categorical_crossentropy
                else:
                    metric_obj = metrics_mod.categorical_crossentropy

        if isinstance(metric_obj, losses_mod.Loss):
            metric_obj._allow_sum_over_batch_size = True

        if not isinstance(metric_obj, metrics_mod.Metric):
            if isinstance(metric, str):
                metric_name = metric
            else:
                metric_name = get_custom_object_name(metric)
                if metric_name is None:
                    raise ValueError(
                        f"Metric should be a callable, received: {metric}")

            metric_obj = metrics_mod.MeanMetricWrapper(metric_obj,
                                                       name=metric_name)

        return metric_obj