def compile_tfrecord(self, optimizer, loss, y, metrics=None, y_val=None):
        """Configures the model for training.

    # Arguments
        optimizer: str (name of optimizer) or optimizer object.
          See [optimizers](/optimizers).
        loss: str (name of objective function) or objective function.
          See [losses](/losses).
          If the model has multiple outputs, you can use a different loss
          on each output by passing a dictionary or a list of losses.
          The loss value that will be minimized by the model
          will then be the sum of all individual losses.
        metrics: list of metrics to be evaluated by the model
          during training and testing.
          Typically you will use `metrics=['accuracy']`.
          To specify different metrics for different outputs of a
          multi-output model, you could also pass a dictionary,
          such as `metrics={'output_a': 'accuracy'}`.

    # Raises
        ValueError: In case of invalid arguments for
            `optimizer`, `loss`, `metrics` or `sample_weight_mode`.
    """
        loss = loss or {}
        self.optimizer = optimizers.get(optimizer)
        self.loss = loss
        self.sample_weight_mode = None
        self.loss_weights = None
        self.y_val = y_val

        do_validation = bool(len(self.val_inputs) > 0)
        if do_validation and y_val is None:
            raise ValueError('When you use validation inputs, '
                             'you should provide y_val.')

        # Prepare loss functions.
        if isinstance(loss, dict):
            for name in loss:
                if name not in self.output_names:
                    raise ValueError('Unknown entry in loss '
                                     'dictionary: "' + name + '". '
                                     'Only expected the following keys: ' +
                                     str(self.output_names))
            loss_functions = []
            for name in self.output_names:
                if name not in loss:
                    warnings.warn('Output "' + name +
                                  '" missing from loss dictionary. '
                                  'We assume this was done on purpose, '
                                  'and we will not be expecting '
                                  'any data to be passed to "' + name +
                                  '" during training.',
                                  stacklevel=2)
                loss_functions.append(losses.get(loss.get(name)))
        elif isinstance(loss, list):
            if len(loss) != len(self.outputs):
                raise ValueError('When passing a list as loss, '
                                 'it should have one entry per model outputs. '
                                 'The model has ' + str(len(self.outputs)) +
                                 ' outputs, but you passed loss=' + str(loss))
            loss_functions = [losses.get(l) for l in loss]
        else:
            loss_function = losses.get(loss)
            loss_functions = [loss_function for _ in range(len(self.outputs))]
        self.loss_functions = loss_functions

        # Prepare training targets of model.
        if isinstance(y, (list, tuple)):
            y = list(y)  # Tensor or list of tensors.
        else:
            y = [y]
        self.targets = []
        for i in range(len(self.outputs)):
            target = y[i]
            self.targets.append(target)

        # Prepare validation targets of model.
        if isinstance(y_val, (list, tuple)):
            y_val = list(y_val)  # Tensor or list of tensors.
        else:
            y_val = [y_val]
        self.y_val = y_val
        self.val_targets = []
        for i in range(len(self.val_outputs)):
            val_target = y_val[i]
            self.val_targets.append(val_target)

        # Prepare metrics.
        self.metrics = metrics
        self.metrics_names = ['loss']
        self.metrics_tensors = []
        self.val_metrics_names = ['loss']
        self.val_metrics_tensors = []

        # Compute total training loss.
        total_loss = None
        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            loss_function = loss_functions[i]
            val_output_loss = K.mean(loss_function(y_true, y_pred))
            if len(self.outputs) > 1:
                self.metrics_tensors.append(val_output_loss)
                self.metrics_names.append(self.output_names[i] + '_loss')
            if total_loss is None:
                total_loss = val_output_loss
            else:
                total_loss += val_output_loss
        if total_loss is None:
            if not self.losses:
                raise RuntimeError('The model cannot be compiled '
                                   'because it has no loss to optimize.')
            else:
                total_loss = 0.

        # Compute total validation loss.
        val_total_loss = None
        for i in range(len(self.val_outputs)):
            y_true = self.val_targets[i]
            y_pred = self.val_outputs[i]
            loss_function = loss_functions[i]
            val_output_loss = K.mean(loss_function(y_true, y_pred))
            if len(self.outputs) > 1:
                self.val_metrics_tensors.append(val_output_loss)
                self.val_metrics_names.append(self.output_names[i] +
                                              '_val_loss')
            if val_total_loss is None:
                val_total_loss = val_output_loss
            else:
                val_total_loss += val_output_loss
        if val_total_loss is None:
            if not self.losses and do_validation:
                raise RuntimeError('The model cannot be compiled '
                                   'because it has no loss to optimize.')
            else:
                val_total_loss = 0.

        # Add regularization penalties
        # and other layer-specific losses.
        for loss_tensor in self.losses:
            total_loss += loss_tensor
            val_total_loss += loss_tensor

        # List of same size as output_names.
        # contains tuples (metrics for output, names of metrics).
        nested_metrics = _collect_metrics(metrics, self.output_names)

        def append_metric(layer_num, metric_name, metric_tensor):
            """Helper function used in loop below."""
            if len(self.output_names) > 1:
                metric_name = self.output_layers[
                    layer_num].name + '_' + metric_name
            self.metrics_names.append(metric_name)
            self.metrics_tensors.append(metric_tensor)

        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            output_metrics = nested_metrics[i]
            for metric in output_metrics:
                if metric == 'accuracy' or metric == 'acc':
                    # custom handling of accuracy
                    # (because of class mode duality)
                    output_shape = self.internal_output_shapes[i]
                    acc_fn = None
                    if (output_shape[-1] == 1 or self.loss_functions[i]
                            == losses.binary_crossentropy):
                        # case: binary accuracy
                        acc_fn = metrics_module.binary_accuracy
                    elif self.loss_functions[
                            i] == losses.sparse_categorical_crossentropy:
                        # case: categorical accuracy with sparse targets
                        acc_fn = metrics_module.sparse_categorical_accuracy
                    else:
                        acc_fn = metrics_module.categorical_accuracy

                    append_metric(i, 'acc', K.mean(acc_fn(y_true, y_pred)))
                else:
                    metric_fn = metrics_module.get(metric)
                    metric_result = metric_fn(y_true, y_pred)
                    metric_result = {metric_fn.__name__: metric_result}
                    for name, tensor in six.iteritems(metric_result):
                        append_metric(i, name, tensor)

        def append_val_metric(layer_num, metric_name, metric_tensor):
            """Helper function used in loop below."""
            if len(self.output_names) > 1:
                metric_name = self.output_layers[
                    layer_num].name + '_val_' + metric_name
            self.val_metrics_names.append(metric_name)
            self.val_metrics_tensors.append(metric_tensor)

        for i in range(len(self.val_outputs)):
            y_true = self.val_targets[i]
            y_pred = self.val_outputs[i]
            output_metrics = nested_metrics[i]
            for metric in output_metrics:
                if metric == 'accuracy' or metric == 'acc':
                    # custom handling of accuracy
                    # (because of class mode duality)
                    output_shape = self.internal_output_shapes[i]
                    acc_fn = None
                    if (output_shape[-1] == 1 or self.loss_functions[i]
                            == losses.binary_crossentropy):
                        # case: binary accuracy
                        acc_fn = metrics_module.binary_accuracy
                    elif self.loss_functions[
                            i] == losses.sparse_categorical_crossentropy:
                        # case: categorical accuracy with sparse targets
                        acc_fn = metrics_module.sparse_categorical_accuracy
                    else:
                        acc_fn = metrics_module.categorical_accuracy

                    append_val_metric(i, 'acc', K.mean(acc_fn(y_true, y_pred)))
                else:
                    metric_fn = metrics_module.get(metric)
                    metric_result = metric_fn(y_true, y_pred)
                    metric_result = {metric_fn.__name__: metric_result}
                    for name, tensor in six.iteritems(metric_result):
                        append_val_metric(i, name, tensor)

        # Prepare gradient updates and state updates.
        self.total_loss = total_loss
        self.val_total_loss = val_total_loss

        # Functions for train, test and predict will
        # be compiled lazily when required.
        # This saves time when the user is not using all functions.
        self.train_function = None
        self.val_function = None
        self.test_function = None
        self.predict_function = None

        # Collected trainable weights and sort them deterministically.
        trainable_weights = self.trainable_weights
        # Sort weights by name.
        if trainable_weights:
            trainable_weights.sort(key=lambda x: x.name)
        self._collected_trainable_weights = trainable_weights
コード例 #2
0
def compile_tfrecord(train_model,
                     optimizer,
                     loss,
                     out_tensor_lst,
                     metrics=[],
                     loss_weights=None):
    train_model.build(train_model)
    # train_model.build()

    train_model.optimizer = optimizers.get(optimizer)
    train_model.loss = loss
    train_model.loss_weights = loss_weights

    # prepare loss weights
    if loss_weights is None:
        loss_weights_list = [1. for _ in range(len(train_model.outputs))]
    elif isinstance(loss_weights, dict):
        for name in loss_weights:
            if name not in train_model.output_names:
                raise ValueError('Unknown entry in loss_weights '
                                 'dictionary: "' + name + '". '
                                 'Only expected the following keys: ' +
                                 str(train_model.output_names))
        loss_weights_list = []
        for name in train_model.output_names:
            loss_weights_list.append(loss_weights.get(name, 1.))
    elif isinstance(loss_weights, list):
        if len(loss_weights) != len(train_model.outputs):
            raise ValueError('When passing a list as loss_weights, '
                             'it should have one entry per model outputs. '
                             'The model has ' + str(len(train_model.outputs)) +
                             ' outputs, but you passed loss_weights=' +
                             str(loss_weights))
        loss_weights_list = loss_weights
    else:
        raise TypeError('Could not interpret loss_weights argument: ' +
                        str(loss_weights) + ' - expected a list of dicts.')

    # prepare loss functions
    if isinstance(loss, dict):
        for name in loss:
            if name not in train_model.output_names:
                raise ValueError('Unknown entry in loss '
                                 'dictionary: "' + name + '". '
                                 'Only expected the following keys: ' +
                                 str(train_model.output_names))
        loss_functions = []
        for name in train_model.output_names:
            if name not in loss:
                raise ValueError('Output "' + name +
                                 '" missing from loss dictionary.')
            loss_functions.append(objectives.get(loss[name]))
    elif isinstance(loss, list):
        if len(loss) != len(train_model.outputs):
            raise ValueError('When passing a list as loss, '
                             'it should have one entry per model outputs. '
                             'The model has ' + str(len(train_model.outputs)) +
                             ' outputs, but you passed loss=' + str(loss))
        loss_functions = [objectives.get(l) for l in loss]
    else:
        loss_function = objectives.get(loss)
        loss_functions = [
            loss_function for _ in range(len(train_model.outputs))
        ]
    train_model.loss_functions = loss_functions
    weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]

    # prepare metrics
    train_model.metrics = metrics
    train_model.metrics_names = ['loss']
    train_model.metrics_tensors = []

    # compute total loss
    total_loss = None
    for i in range(len(train_model.outputs)):
        y_true = out_tensor_lst[i]
        y_pred = train_model.outputs[i]
        _loss = loss_functions[i]
        # _loss = weighted_losses[i]
        loss_weight = loss_weights_list[i]
        # output_loss = _loss(y_true, y_pred, None, None)
        output_loss = K.mean(_loss(y_true, y_pred))
        if len(train_model.outputs) > 1:
            train_model.metrics_tensors.append(output_loss)
            train_model.metrics_names.append(train_model.output_names[i] +
                                             '_loss')
        if total_loss is None:
            total_loss = loss_weight * output_loss
        else:
            total_loss += loss_weight * output_loss

    # add regularization penalties
    # and other layer-specific losses
    for loss_tensor in train_model.losses:
        total_loss += loss_tensor

    # list of same size as output_names.
    # contains tuples (metrics for output, names of metrics)
    nested_metrics = _collect_metrics(metrics, train_model.output_names)

    def append_metric(layer_num, metric_name, metric_tensor):
        """Helper function, used in loop below"""
        if len(train_model.output_names) > 1:
            metric_name = train_model.output_layers[
                layer_num].name + '_' + metric_name

        train_model.metrics_names.append(metric_name)
        train_model.metrics_tensors.append(metric_tensor)

    for i in range(len(train_model.outputs)):
        y_true = out_tensor_lst[i]
        y_pred = train_model.outputs[i]
        output_metrics = nested_metrics[i]

        for metric in output_metrics:
            if metric == 'accuracy' or metric == 'acc':
                # custom handling of accuracy
                # (because of class mode duality)
                output_shape = train_model.internal_output_shapes[i]
                acc_fn = None
                if output_shape[-1] == 1 or train_model.loss_functions[
                        i] == objectives.binary_crossentropy:
                    # case: binary accuracy
                    acc_fn = metrics_module.binary_accuracy
                elif train_model.loss_functions[
                        i] == objectives.sparse_categorical_crossentropy:
                    # case: categorical accuracy with sparse targets
                    acc_fn = metrics_module.sparse_categorical_accuracy
                else:
                    acc_fn = metrics_module.categorical_accuracy

                append_metric(i, 'acc', acc_fn(y_true, y_pred))
            else:
                metric_fn = metrics_module.get(metric)
                metric_result = metric_fn(y_true, y_pred)

                if not isinstance(metric_result, dict):
                    metric_result = {metric_fn.__name__: metric_result}

                for name, tensor in six.iteritems(metric_result):
                    append_metric(i, name, tensor)

    # prepare gradient updates and state updates
    train_model.optimizer = optimizers.get(optimizer)
    train_model.total_loss = total_loss

    train_model.train_function = None
    train_model.test_function = None
    train_model.predict_function = None

    # collected trainable weights and sort them deterministically.
    trainable_weights = train_model.trainable_weights
    # Sort weights by name
    trainable_weights.sort(key=lambda x: x.name)
    train_model._collected_trainable_weights = trainable_weights