Esempio n. 1
0
    def _get_keras_model(self) -> models.Model:
        I = layers.Input(shape=(KerasCNNModel.MAX_SEQUENCE_LENGTH, 300),
                         dtype='float32',
                         name='comment_text')

        # Convolutional Layers
        X = I
        for filter_size in self.hparams().filter_sizes:
            X = layers.Conv1D(self.hparams().num_filters,
                              filter_size,
                              activation='relu',
                              padding='same')(X)
        X = layers.GlobalAveragePooling1D()(X)

        # Dense
        for num_units in self.hparams().dense_units:
            X = layers.Dense(num_units, activation='relu')(X)
            X = layers.Dropout(self.hparams().dropout_rate)(X)

        # Outputs
        outputs = []
        for label in self._labels:
            outputs.append(
                layers.Dense(1, activation='sigmoid', name=label)(X))

        model = models.Model(inputs=I, outputs=outputs)
        model.compile(
            optimizer=optimizers.Adam(lr=self.hparams().learning_rate),
            loss='binary_crossentropy',
            metrics=['binary_accuracy', super().roc_auc])

        tf.logging.info(model.summary())
        return model
Esempio n. 2
0
 def __init__(self, channel, reduction=16):
     super().__init__()
     self.fc = Sequential()
     self.fc.add(layers.GlobalAveragePooling1D())
     self.fc.add(layers.Dense(channel // reduction, activation='relu'))
     self.fc.add(layers.Dense(channel))
     self.fc.add(layers.Activation('sigmoid'))
Esempio n. 3
0
    def _model_fn(self, features, labels, mode, params, config):
        embedding = tf.Variable(tf.truncated_normal(
            [256, params.embedding_size]),
                                name='char_embedding')
        texts = features[base_model.TEXT_FEATURE_KEY]
        batch_size = tf.shape(texts)[0]
        byte_ids = tf.reshape(
            tf.cast(
                tf.decode_raw(
                    tf.sparse_tensor_to_dense(tf.string_split(texts, ''),
                                              default_value='\0'), tf.uint8),
                tf.int32), [batch_size, -1])
        padded_ids = tf.slice(
            tf.concat([
                byte_ids,
                tf.zeros([batch_size, params.string_len], tf.int32)
            ],
                      axis=1), [0, 0], [batch_size, params.string_len])

        inputs = tf.nn.embedding_lookup(params=embedding, ids=padded_ids)

        # Conv
        X = inputs
        for filter_size in params.filter_sizes:
            X = layers.Conv1D(params.num_filters,
                              filter_size,
                              activation='relu',
                              padding='same')(X)
        if params.pooling_type == 'average':
            X = layers.GlobalAveragePooling1D()(X)
        elif params.pooling_type == 'max':
            X = layers.GlobalMaxPooling1D()(X)
        else:
            raise ValueError('Unrecognized pooling type parameter')

        # FC
        logits = X
        for num_units in params.dense_units:
            logits = tf.layers.dense(inputs=logits,
                                     units=num_units,
                                     activation=tf.nn.relu)
            logits = tf.layers.dropout(logits, rate=params.dropout_rate)

        logits = tf.layers.dense(inputs=logits,
                                 units=len(self._target_labels),
                                 activation=None)

        output_heads = [
            tf.contrib.estimator.binary_classification_head(name=name)
            for name in self._target_labels
        ]
        multihead = tf.contrib.estimator.multi_head(output_heads)

        optimizer = tf.train.AdamOptimizer(learning_rate=params.learning_rate)
        return multihead.create_estimator_spec(features=features,
                                               labels=labels,
                                               mode=mode,
                                               logits=logits,
                                               optimizer=optimizer)
 def __init__(self):
     super(Model, self).__init__()
     self.main = tf.keras.Sequential([
         layers.Embedding(vocab_size, embedding_dim, input_length=maxlen),
         layers.GlobalAveragePooling1D(),
         layers.Dense(16, activation=tf.nn.relu),
         layers.Dense(1, activation=tf.nn.sigmoid)
     ])
Esempio n. 5
0
 def __init__(self, vocab_size, embedding_size, max_sentence_len):
     self.vocab_size = vocab_size
     self.embedding_size = embedding_size
     # create model
     self.model = models.Sequential()
     self.model.add(
         layers.Embedding(vocab_size, embedding_size, max_sentence_len))
     self.model.add(layers.GlobalAveragePooling1D())
     self.model.add(layers.Dense(units=1, activation='sigmoid'))
     # 损失和优化器选择
     self.model.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])
     self.model.summary()
Esempio n. 6
0
    def _model_fn(self, features, labels, mode, params, config):
        inputs = features[base_model.TOKENS_FEATURE_KEY]
        batch_size = tf.shape(inputs)[0]

        # Conv
        X = inputs
        for filter_size in params.filter_sizes:
            X = layers.Conv1D(params.num_filters,
                              filter_size,
                              activation='relu',
                              padding='same')(X)
        if params.pooling_type == 'average':
            X = layers.GlobalAveragePooling1D()(X)
        elif params.pooling_type == 'max':
            X = layers.GlobalMaxPooling1D()(X)
        else:
            raise ValueError('Unrecognized pooling type parameter')

        # FC
        logits = X
        for num_units in params.dense_units:
            logits = tf.layers.dense(inputs=logits,
                                     units=num_units,
                                     activation=tf.nn.relu)
            logits = tf.layers.dropout(logits, rate=params.dropout_rate)

        logits = tf.layers.dense(inputs=logits,
                                 units=len(self._target_labels),
                                 activation=None)

        output_heads = [
            tf.contrib.estimator.binary_classification_head(name=name)
            for name in self._target_labels
        ]
        multihead = tf.contrib.estimator.multi_head(output_heads)

        optimizer = tf.train.AdamOptimizer(learning_rate=params.learning_rate)
        return multihead.create_estimator_spec(features=features,
                                               labels=labels,
                                               mode=mode,
                                               logits=logits,
                                               optimizer=optimizer)
Esempio n. 7
0
    test_data, value=word_index['<PAD>'], padding='post', maxlen=maxlen)

print(train_data[0])

# 创建一个简单的模型
'''
    1:第一层是嵌入层。这一层使用整数编码的词汇表,并为每个单词索引查找嵌入向量。这些向量作为模型火车来学习。这些向量向输出数组添加一个维度。得到的维度是:'(批量、顺序、嵌入)' '。
    2:接下来,GlobalAveragePooling1D层通过对序列维数进行平均,为每个示例返回一个固定长度的输出向量。这允许模型以最简单的方式处理可变长度的输入。
    3:这个固定长度的输出向量通过一个有16个隐藏单元的全连接(密集)层来传输。
    4:最后一层与单个输出节点紧密连接。使用sigmoid激活函数,这个值是0到1之间的浮点数,表示评审结果为正的概率(或置信水平)。
'''

embedding_dim = 16
model = keras.Sequential([
    layers.Embedding(vocab_size, embedding_dim, input_length=maxlen),
    layers.GlobalAveragePooling1D(),
    layers.Dense(16, activation='relu'),
    layers.Dense(1, activation='sigmoid')
])

model.summary()

# 编译和训练模型
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

history = model.fit(train_data,
                    train_labels,
                    epochs=30,
                    batch_size=512,
To dive more in-depth into the differences between the Functional API and Model subclassing, you can read [What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://medium.com/tensorflow/what-are-symbolic-and-imperative-apis-in-tensorflow-2-0-dfccecb01021).

## Mix-and-matching different API styles

Importantly, choosing between the Functional API or Model subclassing isn't a binary decision that restricts you to one category of models. All models in the tf.keras API can interact with each, whether they're Sequential models, Functional models, or subclassed Models/Layers written from scratch.

You can always use a Functional model or Sequential model as part of a subclassed Model/Layer:
"""

units = 32
timesteps = 10
input_dim = 5

# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)


class CustomRNN(layers.Layer):
    def __init__(self):
        super(CustomRNN, self).__init__()
        self.units = units
        self.projection_1 = layers.Dense(units=units, activation='tanh')
        self.projection_2 = layers.Dense(units=units, activation='tanh')
        # Our previously-defined Functional model
        self.classifier = model

    def call(self, inputs):
        outputs = []