def build_model(self, inputs, outputs): outputs_spati = L.SpatialDropout1D(self.dropout_spatial)(outputs) conv_pools = [] for filter in self.filters_size: x = L.Conv1D( filters=self.filters_num, kernel_size=filter, padding="valid", kernel_initializer="normal", activation="relu", )(outputs_spati) capsule = Capsule_bojone(num_capsule=self.num_capsule, dim_capsule=self.dim_capsule, routings=self.routings, kernel_size=(filter, 1), share_weights=True)(x) conv_pools.append(capsule) capsule = L.Concatenate(axis=-1)(conv_pools) x = L.Flatten()(capsule) x = L.Dropout(self.dropout)(x) # dense-mid x = L.Dense(units=min(max(self.label, 64), self.embed_size), activation=self.activate_mid)(x) x = L.Dropout(self.dropout)(x) # dense-end, 最后一层, dense到label self.outputs = L.Dense(units=self.label, activation=self.activate_end)(x) self.model = M.Model(inputs=inputs, outputs=self.outputs) self.model.summary(132)
def call(self, inputs): x_input_pad = L.ZeroPadding1D( (self.filter_size - 1, self.filter_size - 1))(inputs) conv_1d = L.Conv1D( filters=self.filter_num, kernel_size=self.filter_size, strides=1, padding="VALID", kernel_initializer="normal", # )(x_input_pad) activation="tanh")(x_input_pad) return conv_1d
def build_model(self, inputs, outputs): # rnn type, RNN的类型 if self.rnn_unit == "LSTM": layer_cell = L.LSTM elif self.rnn_unit == "CuDNNLSTM": layer_cell = L.CuDNNLSTM elif self.rnn_unit == "CuDNNGRU": layer_cell = L.CuDNNGRU else: layer_cell = L.GRU # embedding遮挡 embedding_output_spatial = L.SpatialDropout1D( self.dropout_spatial)(outputs) # CNN convs = [] for kernel_size in self.filters_size: conv = L.Conv1D( self.filters_num, kernel_size=kernel_size, strides=1, padding='SAME', kernel_regularizer=keras.regularizers.l2(self.l2), bias_regularizer=keras.regularizers.l2(self.l2), )(embedding_output_spatial) convs.append(conv) x = L.Concatenate(axis=1)(convs) # Bi-LSTM, 论文中使用的是LSTM x = L.Bidirectional( layer_cell(units=self.rnn_unit, return_sequences=True, activation='relu', kernel_regularizer=keras.regularizers.l2(self.l2), recurrent_regularizer=keras.regularizers.l2( self.l2)))(x) x = L.Dropout(self.dropout)(x) x = L.Flatten()(x) # dense-mid x = L.Dense(units=min(max(self.label, 64), self.embed_size), activation=self.activate_mid)(x) x = L.Dropout(self.dropout)(x) # dense-end, 最后一层, dense到label self.outputs = L.Dense(units=self.label, activation=self.activate_end)(x) self.model = M.Model(inputs=inputs, outputs=self.outputs) self.model.summary(132)
def build_model(self, inputs, outputs): """ build_model. Args: inputs: tensor, input of model outputs: tensor, output of model Returns: None """ # LSTM or GRU if self.rnn_type == "LSTM": rnn_cell = L.LSTM elif self.rnn_type == "CuDNNLSTM": rnn_cell = L.CuDNNLSTM elif self.rnn_type == "CuDNNGRU": rnn_cell = L.CuDNNGRU else: rnn_cell = L.GRU # CNN-LSTM, 提取n-gram特征和最大池化, 一般不用平均池化 conv_pools = [] for i in range(len(self.filters_size)): conv = L.Conv1D( name="conv-{0}-{1}".format(i, self.filters_size[i]), kernel_size=self.filters_size[i], activation=self.activate_mid, filters=self.filters_num, padding='same', )(outputs) conv_rnn = L.Bidirectional( rnn_cell( name="bi-lstm-{0}-{1}".format(i, self.filters_size[i]), activation=self.activate_mid, return_sequences=True, units=self.rnn_unit, ))(conv) x_dropout = L.Dropout(rate=self.dropout, name="dropout-{0}-{1}".format( i, self.filters_size[i]))(conv_rnn) conv_pools.append(x_dropout) # 拼接 x = L.Concatenate(axis=-1)(conv_pools) x = L.Dropout(self.dropout)(x) # CRF or Dense if self.use_crf: x = L.Dense(units=self.label, activation=self.activate_end)(x) self.CRF = ConditionalRandomField(self.crf_lr_multiplier, name="crf_bert4keras") self.outputs = self.CRF(x) self.trans = K.eval(self.CRF.trans).tolist() self.loss = self.CRF.dense_loss if self.use_onehot else self.CRF.sparse_loss self.metrics = [ self.CRF.dense_accuracy if self.use_onehot else self.CRF.sparse_accuracy ] else: self.outputs = L.TimeDistributed( L.Dense(units=self.label, activation=self.activate_end, name="dense-output"))(x) self.model = M.Model(inputs, self.outputs) self.model.summary(132)
def build_model(self, inputs, outputs): """ build_model. Args: inputs: tensor, input of model outputs: tensor, output of model Returns: None """ # CNN, 提取n-gram特征和最大池化, DGCNN膨胀卷积(IDCNN) conv_pools = [] for i in range(len(self.filters_size)): conv = L.Conv1D( name="conv-{0}-{1}".format(i, self.filters_size[i]), dilation_rate=self.atrous_rates[0], kernel_size=self.filters_size[i], activation=self.activate_mid, filters=self.filters_num, padding="SAME", )(outputs) for j in range(len(self.atrous_rates) - 1): conv = L.Conv1D( name="conv-{0}-{1}-{2}".format(i, self.filters_size[i], j), dilation_rate=self.atrous_rates[j], kernel_size=self.filters_size[i], activation=self.activate_mid, filters=self.filters_num, padding="SAME", )(conv) conv = L.Dropout( name="dropout-{0}-{1}-{2}".format(i, self.filters_size[i], j), rate=self.dropout, )(conv) conv_pools.append(conv) # 拼接 x = L.Concatenate(axis=-1)(conv_pools) x = L.Dropout(self.dropout)(x) # CRF or Dense if self.use_crf: x = L.Dense(units=self.label, activation=self.activate_end)(x) self.CRF = ConditionalRandomField(self.crf_lr_multiplier, name="crf_bert4keras") self.outputs = self.CRF(x) self.trans = K.eval(self.CRF.trans).tolist() self.loss = self.CRF.dense_loss if self.use_onehot else self.CRF.sparse_loss self.metrics = [ self.CRF.dense_accuracy if self.use_onehot else self.CRF.sparse_accuracy ] else: x = L.Bidirectional( L.GRU( activation=self.activate_mid, return_sequences=True, units=self.rnn_unit, name="bi-gru", ))(x) self.outputs = L.TimeDistributed( L.Dense( activation=self.activate_end, name="dense-output", units=self.label, ))(x) self.model = M.Model(inputs, self.outputs) self.model.summary(132)