Beispiel #1
0
def hidden_output(
    X: np.ndarray,
    model: tf.keras.Model = None,
    layer: int = -1,
    input_shape: tuple = None,
    batch_size: int = int(1e10)) -> np.ndarray:
    """
    Return hidden layer output from a model on a batch of instances.

    Parameters
    ----------
    X
        Batch of instances.
    model
        tf.keras.Model.
    layer
        Hidden layer of model to use as output. The default of -1 would refer to the softmax layer.
    input_shape
        Optional input layer shape.
    batch_size
        Batch size used for the model predictions.

    Returns
    -------
    Model predictions using the specified hidden layer as output layer.
    """
    if input_shape and not model.inputs:
        inputs = Input(shape=input_shape)
        model.call(inputs)
    else:
        inputs = model.inputs
    hidden_model = Model(inputs=inputs, outputs=model.layers[layer].output)
    X_hidden = predict_batch(hidden_model, X, batch_size=batch_size)
    return X_hidden
Beispiel #2
0
 def __init__(self,
              model: tf.keras.Model,
              layer: int = -1,
              input_shape: tuple = None) -> None:
     super().__init__()
     if input_shape and not model.inputs:
         inputs = Input(shape=input_shape)
         model.call(inputs)
     else:
         inputs = model.inputs
     self.model = Model(inputs=inputs, outputs=model.layers[layer].output)
Beispiel #3
0
 def __init__(
         self,
         model: tf.keras.Model,
         layer: int = -1,
         input_shape: tuple = None,
         flatten: bool = False
 ) -> None:
     super().__init__()
     if input_shape and not model.inputs:
         inputs = Input(shape=input_shape)
         model.call(inputs)
     else:
         inputs = model.inputs
     self.model = Model(inputs=inputs, outputs=model.layers[layer].output)
     self.flatten = Flatten() if flatten else tf.identity
Beispiel #4
0
def train_on_batch(x, y_true, model:tf.keras.Model, optimizer):
    '''
    按批次训练(正向传播)
    :param x:           特征集
    :param y_true:      标签集
    :param model:       模型对象
    :param optimizer:   优化器
    :param norm         正则化项
    :return:
    '''
    # 计算梯度 => 相当于计算偏导数 \partial{loss}/\partial{\theta}
    with tf.GradientTape() as tape:
        y_hat = model.call(x)

        # loss = tf.losses.mean_squared_error(y, y_hat)
        loss = tf.losses.binary_crossentropy(y_true, y_hat, from_logits=False)
        loss += model.l2_norm()

    # 计算梯度
    grads = tape.gradient(loss, sources=model.trainable_variables)

    # 自动更新参数
    optimizer.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))

    return y_hat, loss