예제 #1
0
 def get_text_model(embedding_size=EMBEDDING_SIZE,
                    input_length=MAX_DOCUMENT_LENGTH):
     inputs = keras.Input(shape=(input_length, embedding_size))
     x = layers.Dropout(0.1)(inputs)
     x = layers.Convolution1D(
         16,
         kernel_size=4,
         activation='relu',
         strides=1,
         padding='same',
         kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     x = layers.Convolution1D(
         12,
         kernel_size=8,
         activation='relu',
         strides=2,
         padding='same',
         kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     x = layers.Convolution1D(
         8,
         kernel_size=16,
         activation='relu',
         strides=2,
         padding='same',
         kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     outputs = layers.Flatten()(x)
     # outputs = layers.Dense(2, activation='relu', kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     return keras.Model(inputs, outputs)
예제 #2
0
    def get_model_head(embedding_size=EMBEDDING_SIZE,
                       input_length=MAX_DOCUMENT_LENGTH,
                       link_embedding_size=LINK_EMBEDDING_SIZE,
                       link_input_length=LINK_INPUT_LENGTH):
        he_inputs = keras.Input(shape=(input_length, embedding_size))
        en_inputs = keras.Input(shape=(input_length, embedding_size))
        link_inputs = keras.Input(shape=(link_input_length,
                                         link_embedding_size))

        he_outputs = CnnClfEnsemble.get_text_model()(he_inputs)
        en_outputs = CnnClfEnsemble.get_text_model()(en_inputs)
        link_outputs = CnnClfEnsemble.get_link_model()(link_inputs)

        x = layers.Concatenate()([he_outputs, en_outputs, link_outputs])
        x = layers.Dense(128,
                         activation='relu',
                         kernel_constraint=constraints.MaxNorm(max_value=3))(x)
        x = layers.Dropout(0.5)(x)
        x = layers.Dense(64,
                         activation='relu',
                         kernel_constraint=constraints.MaxNorm(max_value=3))(x)
        outputs = layers.Dense(
            2,
            activation='softmax',
            kernel_constraint=constraints.MaxNorm(max_value=3))(x)
        return keras.Model(inputs=[he_inputs, en_inputs, link_inputs],
                           outputs=outputs)
예제 #3
0
 def get_mlp_model(self, num_classes, name):
     inputs = keras.Input((self.input_length, self.embedding_size))
     x = layers.Dropout(0.5)(inputs)
     x = layers.Dense(128, activation='relu', kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     x = layers.Dense(64, activation='relu', kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     x = layers.Dense(32, activation='relu', kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     outputs = layers.Dense(num_classes, activation='softmax', kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     return keras.Model(inputs, outputs, name=name)
예제 #4
0
def test_cosinedense_reg_constraint():
    layer_test(core.CosineDense,
               kwargs={
                   'units': 3,
                   'kernel_regularizer': regularizers.l2(0.01),
                   'bias_regularizer': regularizers.l1(0.01),
                   'activity_regularizer': regularizers.l2(0.01),
                   'kernel_constraint': constraints.MaxNorm(1),
                   'bias_constraint': constraints.MaxNorm(1)
               },
               input_shape=(3, 2))
예제 #5
0
 def _set_constraints(self):
     if self.scope in self.specs['maxnorm_scope']:
         constr = k_con.MaxNorm(2.)
         print('Setting constraint for {}, to MaxNorm'.format(self.scope))
     else:
         constr = None
     return constr
예제 #6
0
 def __init__(self,
              model_name,
              klass_name,
              embedding_matrix,
              embedding_size=EMBEDDING_SIZE,
              input_length=MAX_DOCUMENT_LENGTH):
     self.klass_name = klass_name
     self.model = models.Sequential(name=f'{model_name}-model')
     self.model.add(
         layers.Embedding(
             embedding_matrix.shape[0],
             embedding_size,
             input_length=input_length,
             embeddings_initializer=initializers.Constant(embedding_matrix),
             trainable=False))
     # model.add(layers.Embedding(len(tokenizer.word_index)+1, embedding_size, input_length=MAX_DOCUMENT_LENGTH))  # for trainable embedding layer
     self.model.add(layers.Dropout(0.1))
     self.model.add(
         layers.Convolution1D(
             16,
             kernel_size=4,
             activation='relu',
             strides=1,
             padding='same',
             kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(
         layers.Convolution1D(
             12,
             kernel_size=8,
             activation='relu',
             strides=2,
             padding='same',
             kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(
         layers.Convolution1D(
             8,
             kernel_size=16,
             activation='relu',
             strides=2,
             padding='same',
             kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(layers.Flatten())
     self.model.add(
         layers.Dense(128,
                      activation='relu',
                      kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(
         layers.Dense(64,
                      activation='relu',
                      kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(
         layers.Dense(2,
                      activation='softmax',
                      kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.compile(
         optimizer=optimizers.Adam(),  #learning_rate=0.001), 
         loss=losses.CategoricalCrossentropy(from_logits=False),
         metrics=[
             metrics.CategoricalAccuracy(),
             metrics.Recall(class_id=0),
             metrics.Precision(class_id=0)
         ])