def build_input(self):
        model_inputs = []
        input_embed = []

        # TODO: consider masking
        if self.use_char:
            if self.char_embeddings is not None:
                char_embedding_layer = tf.keras.layers.Embedding(
                    input_dim=self.char_vocab_size,
                    output_dim=self.char_embed_dim,
                    weights=[self.char_embeddings],
                    trainable=self.char_embed_trainable)
            else:
                char_embedding_layer = tf.keras.layers.Embedding(
                    input_dim=self.char_vocab_size,
                    output_dim=self.char_embed_dim)
            input_char = tf.keras.layers.Input(shape=(self.max_len, ))
            model_inputs.append(input_char)
            char_embed = char_embedding_layer(input_char)
            input_embed.append(
                tf.keras.layers.SpatialDropout1D(self.dropout)(char_embed))

        if self.use_bert:
            bert_model = build_bert_model(
                config_path=self.bert_config_file,
                checkpoint_path=self.bert_checkpoint_file)
            if not self.bert_trainable:
                # manually set every layer in bert model to be non-trainable
                for layer in bert_model.layers:
                    layer.trainable = False
            model_inputs.extend(bert_model.inputs)
            bert_embed = NonMaskingLayer()(bert_model.output)
            input_embed.append(
                tf.keras.layers.SpatialDropout1D(0.2)(bert_embed))

        if self.use_word:
            if self.word_embeddings is not None:
                word_embedding_layer = tf.keras.layers.Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim,
                    weights=[self.word_embeddings],
                    trainable=self.word_embed_trainable)
            else:
                word_embedding_layer = tf.keras.layers.Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim)
            input_word = tf.keras.layers.Input(shape=(self.max_len, ))
            model_inputs.append(input_word)
            word_embed = word_embedding_layer(input_word)
            input_embed.append(
                tf.keras.layers.SpatialDropout1D(self.dropout)(word_embed))

        if len(input_embed) > 1:
            input_embed = tf.keras.layers.concatenate(input_embed)
        else:
            input_embed = input_embed[0]
        return model_inputs, input_embed
Esempio n. 2
0
    def build_input(self):
        model_inputs = []
        input_embed = []

        # TODO: consider masking
        if self.use_char:
            if self.char_embeddings is not None:
                char_embedding_layer = Embedding(
                    input_dim=self.char_vocab_size,
                    output_dim=self.char_embed_dim,
                    weights=[self.char_embeddings],
                    trainable=self.char_embed_trainable)
            else:
                char_embedding_layer = Embedding(
                    input_dim=self.char_vocab_size,
                    output_dim=self.char_embed_dim)
            input_char = Input(shape=(self.max_len, ))
            model_inputs.append(input_char)
            input_embed.append(
                SpatialDropout1D(self.dropout)(
                    char_embedding_layer(input_char)))

        if self.use_bert:
            bert_model = load_trained_model_from_checkpoint(
                self.bert_config_file,
                self.bert_checkpoint_file,
                trainable=self.bert_trainable,
                output_layer_num=1,
                seq_len=self.max_len)
            model_inputs.extend(bert_model.inputs)
            bert_embed = NonMaskingLayer()(bert_model.output)
            input_embed.append(SpatialDropout1D(0.2)(bert_embed))

        if self.use_word:
            if self.word_embeddings is not None:
                word_embedding_layer = Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim,
                    weights=[self.word_embeddings],
                    trainable=self.word_embed_trainable)
            else:
                word_embedding_layer = Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim)
            input_word = Input(shape=(self.max_len, ))
            model_inputs.append(input_word)
            input_embed.append(
                SpatialDropout1D(self.dropout)(
                    word_embedding_layer(input_word)))

        input_embed = concatenate(
            input_embed) if len(input_embed) > 1 else input_embed[0]
        return model_inputs, input_embed
Esempio n. 3
0
    def build_input(self):
        """Build input placeholder and prepare embedding for ner model.

        Returns: Tuples of 2 tensor:
            1). Input tensor(s), depending whether using multiple inputs;
            2). Embedding tensor, which will be passed to following layers of ner models.

        """
        model_inputs = []
        input_embed = []

        # TODO: consider masking
        if self.use_char:
            if self.char_embeddings is not None:
                char_embedding_layer = tf.keras.layers.Embedding(
                    input_dim=self.char_vocab_size,
                    output_dim=self.char_embed_dim,
                    weights=[self.char_embeddings],
                    trainable=self.char_embed_trainable)
            else:
                char_embedding_layer = tf.keras.layers.Embedding(input_dim=self.char_vocab_size,
                                                                 output_dim=self.char_embed_dim)
            input_char = tf.keras.layers.Input(shape=(self.max_len,))
            model_inputs.append(input_char)

            char_embed = char_embedding_layer(input_char)
            input_embed.append(tf.keras.layers.SpatialDropout1D(self.dropout)(char_embed))

        if self.use_bert:
            bert_model = build_bert_model(config_path=self.bert_config_file,
                                          checkpoint_path=self.bert_checkpoint_file)
            if not self.bert_trainable:
                # manually set every layer in bert model to be non-trainable
                for layer in bert_model.layers:
                    layer.trainable = False

            model_inputs.extend(bert_model.inputs)#由于Bert输入是前后句,所以extend把两句话和为了一句
            bert_embed = NonMaskingLayer()(bert_model.output)
            input_embed.append(tf.keras.layers.SpatialDropout1D(0.2)(bert_embed))

        if self.use_word:
            if self.word_embeddings is not None:
                word_embedding_layer = tf.keras.layers.Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim,
                    weights=[self.word_embeddings],
                    trainable=self.word_embed_trainable)
            else:
                word_embedding_layer = tf.keras.layers.Embedding(input_dim=self.word_vocab_size,
                                                                 output_dim=self.word_embed_dim)
            input_word = tf.keras.layers.Input(shape=(self.max_len,))
            model_inputs.append(input_word)

            word_embed = word_embedding_layer(input_word)
            input_embed.append(tf.keras.layers.SpatialDropout1D(self.dropout)(word_embed))

        if len(input_embed) > 1:
            input_embed = tf.keras.layers.concatenate(input_embed)
        else:
            input_embed = input_embed[0]
        return model_inputs, input_embed
Esempio n. 4
0
    def build_input(self):

        # TODO: consider masking
        if self.use_bert_model:
            model_inputs = []
            bert_model = load_trained_model_from_checkpoint(
                self.bert_config_file,
                self.bert_checkpoint_file,
                trainable=self.bert_trainable,
                output_layer_num=self.bert_output_layer_num,
                seq_len=self.max_len)
            input_bert = Input(shape=(self.max_len, ))
            input_seg = Input(shape=(self.max_len, ))
            model_inputs.append(input_bert)
            model_inputs.append(input_seg)
            bert_embed = NonMaskingLayer()(bert_model([input_bert, input_seg]))
            input_embed = SpatialDropout1D(self.dropout)(bert_embed)

            return model_inputs, input_embed

        model_inputs_a = []
        input_embed_a = []
        model_inputs_b = []
        input_embed_b = []

        if self.use_word:
            if self.word_embeddings is not None:
                word_embedding_layer = Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim,
                    weights=[self.word_embeddings],
                    trainable=self.word_embed_trainable)
            else:
                word_embedding_layer = Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim)
            input_word_a = Input(shape=(self.max_len, ))
            model_inputs_a.append(input_word_a)
            input_embed_a.append(
                SpatialDropout1D(self.dropout)(
                    word_embedding_layer(input_word_a)))
            input_word_b = Input(shape=(self.max_len, ))
            model_inputs_b.append(input_word_b)
            input_embed_b.append(
                SpatialDropout1D(self.dropout)(
                    word_embedding_layer(input_word_b)))

            if self.use_char:
                if self.char_embeddings is not None:
                    char_embedding_layer = Embedding(
                        input_dim=self.char_vocab_size,
                        output_dim=self.char_embed_dim,
                        weights=[self.char_embeddings],
                        trainable=self.char_embed_trainable)
                else:
                    char_embedding_layer = Embedding(
                        input_dim=self.char_vocab_size,
                        output_dim=self.char_embed_dim)
                input_char_a = Input(shape=(self.max_len, self.max_word_len))
                model_inputs_a.append(input_char_a)
                input_char_b = Input(shape=(self.max_len, self.max_word_len))
                model_inputs_b.append(input_char_b)
                char_embed_a, char_embed_b = self.build_char_embedding(
                    char_embedding_layer, input_char_a, input_char_b)
                input_embed_a.append(
                    SpatialDropout1D(self.dropout)(char_embed_a))
                input_embed_b.append(
                    SpatialDropout1D(self.dropout)(char_embed_b))

        else:
            if self.use_char:
                if self.char_embeddings is not None:
                    char_embedding_layer = Embedding(
                        input_dim=self.char_vocab_size,
                        output_dim=self.char_embed_dim,
                        weights=[self.char_embeddings],
                        trainable=self.char_embed_trainable)
                else:
                    char_embedding_layer = Embedding(
                        input_dim=self.char_vocab_size,
                        output_dim=self.char_embed_dim)
                input_char_a = Input(shape=(self.max_len, ))
                model_inputs_a.append(input_char_a)
                input_embed_a.append(
                    SpatialDropout1D(self.dropout)(
                        char_embedding_layer(input_char_a)))
                input_char_b = Input(shape=(self.max_len, ))
                model_inputs_b.append(input_char_b)
                input_embed_b.append(
                    SpatialDropout1D(self.dropout)(
                        char_embedding_layer(input_char_b)))

            if self.use_bert:
                bert_model = load_trained_model_from_checkpoint(
                    self.bert_config_file,
                    self.bert_checkpoint_file,
                    trainable=self.bert_trainable,
                    output_layer_num=self.bert_output_layer_num,
                    seq_len=self.max_len)
                input_bert_a = Input(shape=(self.max_len, ))
                input_seg_a = Input(shape=(self.max_len, ))
                model_inputs_a.append(input_bert_a)
                model_inputs_a.append(input_seg_a)
                bert_embed_a = NonMaskingLayer()(bert_model(
                    [input_bert_a, input_seg_a]))
                input_embed_a.append(
                    SpatialDropout1D(self.dropout)(bert_embed_a))

                input_bert_b = Input(shape=(self.max_len, ))
                input_seg_b = Input(shape=(self.max_len, ))
                model_inputs_b.append(input_bert_b)
                model_inputs_b.append(input_seg_b)
                bert_embed_b = NonMaskingLayer()(bert_model(
                    [input_bert_b, input_seg_b]))
                input_embed_b.append(
                    SpatialDropout1D(self.dropout)(bert_embed_b))

        input_embed_a = concatenate(input_embed_a) if len(input_embed_a) > 1 \
            else input_embed_a[0]
        input_embed_b = concatenate(input_embed_b) if len(input_embed_b) > 1 \
            else input_embed_b[0]
        return model_inputs_a + model_inputs_b, input_embed_a, input_embed_b
Esempio n. 5
0
    def build_input(self):

        # TODO: consider masking
        # build input for bert model
        if self.use_bert_model:
            model_inputs = []
            bert_model = build_bert_model(
                config_path=self.bert_config_file,
                checkpoint_path=self.bert_checkpoint_file)
            if not self.bert_trainable:
                # manually set every layer in bert model to be non-trainable
                for layer in bert_model.layers:
                    layer.trainable = False

            input_bert = tf.keras.layers.Input(shape=(self.max_len, ))
            input_seg = tf.keras.layers.Input(shape=(self.max_len, ))
            model_inputs.append(input_bert)
            model_inputs.append(input_seg)
            bert_embed = NonMaskingLayer()(bert_model([input_bert, input_seg]))
            input_embed = tf.keras.layers.SpatialDropout1D(
                self.dropout)(bert_embed)

            return model_inputs, input_embed

        model_inputs_a = []
        input_embed_a = []
        model_inputs_b = []
        input_embed_b = []

        if self.use_word:
            # add word input
            if self.word_embeddings is not None:
                word_embedding_layer = tf.keras.layers.Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim,
                    weights=[self.word_embeddings],
                    trainable=self.word_embed_trainable)
            else:
                word_embedding_layer = tf.keras.layers.Embedding(
                    input_dim=self.word_vocab_size,
                    output_dim=self.word_embed_dim)

            input_word_a = tf.keras.layers.Input(shape=(self.max_len, ))
            model_inputs_a.append(input_word_a)
            input_embed_a.append(
                tf.keras.layers.SpatialDropout1D(self.dropout)(
                    word_embedding_layer(input_word_a)))
            input_word_b = tf.keras.layers.Input(shape=(self.max_len, ))
            model_inputs_b.append(input_word_b)
            input_embed_b.append(
                tf.keras.layers.SpatialDropout1D(self.dropout)(
                    word_embedding_layer(input_word_b)))

            # add char input
            if self.use_char:
                if self.char_embeddings is not None:
                    char_embedding_layer = tf.keras.layers.Embedding(
                        input_dim=self.char_vocab_size,
                        output_dim=self.char_embed_dim,
                        weights=[self.char_embeddings],
                        trainable=self.char_embed_trainable)
                else:
                    char_embedding_layer = tf.keras.layers.Embedding(
                        input_dim=self.char_vocab_size,
                        output_dim=self.char_embed_dim)

                input_char_a = tf.keras.layers.Input(shape=(self.max_len,
                                                            self.max_word_len))
                model_inputs_a.append(input_char_a)
                input_char_b = tf.keras.layers.Input(shape=(self.max_len,
                                                            self.max_word_len))
                model_inputs_b.append(input_char_b)
                char_embed_a, char_embed_b = self.build_char_embedding(
                    char_embedding_layer, input_char_a, input_char_b)
                input_embed_a.append(
                    tf.keras.layers.SpatialDropout1D(
                        self.dropout)(char_embed_a))
                input_embed_b.append(
                    tf.keras.layers.SpatialDropout1D(
                        self.dropout)(char_embed_b))

        else:
            # add char input
            if self.use_char:
                if self.char_embeddings is not None:
                    char_embedding_layer = tf.keras.layers.Embedding(
                        input_dim=self.char_vocab_size,
                        output_dim=self.char_embed_dim,
                        weights=[self.char_embeddings],
                        trainable=self.char_embed_trainable)
                else:
                    char_embedding_layer = tf.keras.layers.Embedding(
                        input_dim=self.char_vocab_size,
                        output_dim=self.char_embed_dim)

                input_char_a = tf.keras.layers.Input(shape=(self.max_len, ))
                model_inputs_a.append(input_char_a)
                input_embed_a.append(
                    tf.keras.layers.SpatialDropout1D(self.dropout)(
                        char_embedding_layer(input_char_a)))
                input_char_b = tf.keras.layers.Input(shape=(self.max_len, ))
                model_inputs_b.append(input_char_b)
                input_embed_b.append(
                    tf.keras.layers.SpatialDropout1D(self.dropout)(
                        char_embedding_layer(input_char_b)))

            # add bert input
            if self.use_bert:
                bert_model = build_bert_model(
                    config_path=self.bert_config_file,
                    checkpoint_path=self.bert_checkpoint_file)
                if not self.bert_trainable:
                    # manually set every layer in bert model to be non-trainable
                    for layer in bert_model.layers:
                        layer.trainable = False

                input_bert_a = tf.keras.layers.Input(shape=(self.max_len, ))
                input_seg_a = tf.keras.layers.Input(shape=(self.max_len, ))
                model_inputs_a.append(input_bert_a)
                model_inputs_a.append(input_seg_a)
                bert_embed_a = NonMaskingLayer()(bert_model(
                    [input_bert_a, input_seg_a]))
                input_embed_a.append(
                    tf.keras.layers.SpatialDropout1D(
                        self.dropout)(bert_embed_a))

                input_bert_b = tf.keras.layers.Input(shape=(self.max_len, ))
                input_seg_b = tf.keras.layers.Input(shape=(self.max_len, ))
                model_inputs_b.append(input_bert_b)
                model_inputs_b.append(input_seg_b)
                bert_embed_b = NonMaskingLayer()(bert_model(
                    [input_bert_b, input_seg_b]))
                input_embed_b.append(
                    tf.keras.layers.SpatialDropout1D(
                        self.dropout)(bert_embed_b))

        input_embed_a = tf.keras.layers.concatenate(input_embed_a) if len(input_embed_a) > 1 \
            else input_embed_a[0]
        input_embed_b = tf.keras.layers.concatenate(input_embed_b) if len(input_embed_b) > 1 \
            else input_embed_b[0]
        return model_inputs_a + model_inputs_b, input_embed_a, input_embed_b