コード例 #1
0
    class Config(_EncoderBaseModel.Config):
        class BertModelInput(_EncoderBaseModel.Config.ModelInput):
            tokens: BERTTensorizer.Config = BERTTensorizer.Config(max_seq_len=128)

        inputs: BertModelInput = BertModelInput()
        encoder: TransformerSentenceEncoderBase.Config = (
            HuggingFaceBertSentenceEncoder.Config()
        )
コード例 #2
0
    class Config(_EncoderPairwiseModel.Config):
        class ModelInput(_EncoderPairwiseModel.Config.ModelInput):
            tokens1: BERTTensorizerBase.Config = BERTTensorizer.Config(
                columns=["text1"], max_seq_len=128)
            tokens2: BERTTensorizerBase.Config = BERTTensorizer.Config(
                columns=["text2"], max_seq_len=128)

        inputs: ModelInput = ModelInput()
        encoder: TransformerSentenceEncoderBase.Config = (
            HuggingFaceBertSentenceEncoder.Config())
コード例 #3
0
    class Config(NewBertModel.Config):
        class ModelInput(BaseModel.Config.ModelInput):
            squad_input: SquadForBERTTensorizer.Config = SquadForBERTTensorizer.Config(
                max_seq_len=256)
            # is_impossible label
            has_answer: LabelTensorizer.Config = LabelTensorizer.Config(
                column="has_answer")

        inputs: ModelInput = ModelInput()
        encoder: TransformerSentenceEncoderBase.Config = HuggingFaceBertSentenceEncoder.Config(
        )
        decoder: MLPDecoder.Config = MLPDecoder.Config(out_dim=2)
        output_layer: SquadOutputLayer.Config = SquadOutputLayer.Config()
コード例 #4
0
    class Config(BasePairwiseModel.Config):
        class ModelInput(ModelInputBase):
            tokens1: BERTTensorizer.Config = BERTTensorizer.Config(
                columns=["text1"], max_seq_len=128)
            tokens2: BERTTensorizer.Config = BERTTensorizer.Config(
                columns=["text2"], max_seq_len=128)
            labels: LabelTensorizer.Config = LabelTensorizer.Config()
            # for metric reporter
            num_tokens: NtokensTensorizer.Config = NtokensTensorizer.Config(
                names=["tokens1", "tokens2"], indexes=[2, 2])

        inputs: ModelInput = ModelInput()
        encoder: TransformerSentenceEncoderBase.Config = (
            HuggingFaceBertSentenceEncoder.Config())
        shared_encoder: bool = True
コード例 #5
0
    class Config(BaseModel.Config):
        class BertModelInput(BaseModel.Config.ModelInput):
            tokens: BERTTensorizer.Config = BERTTensorizer.Config(
                max_seq_len=128)
            dense: Optional[FloatListTensorizer.Config] = None
            labels: LabelTensorizer.Config = LabelTensorizer.Config()
            # for metric reporter
            num_tokens: NtokensTensorizer.Config = NtokensTensorizer.Config(
                names=["tokens"], indexes=[2])

        inputs: BertModelInput = BertModelInput()
        encoder: TransformerSentenceEncoderBase.Config = (
            HuggingFaceBertSentenceEncoder.Config())
        decoder: MLPDecoder.Config = MLPDecoder.Config()
        output_layer: ClassificationOutputLayer.Config = (
            ClassificationOutputLayer.Config())
コード例 #6
0
    class Config(BasePairwiseModel.Config):
        class ModelInput(ModelInputBase):
            tokens1: BERTTensorizerBase.Config = BERTTensorizer.Config(
                columns=["text1"], max_seq_len=128)
            tokens2: BERTTensorizerBase.Config = BERTTensorizer.Config(
                columns=["text2"], max_seq_len=128)
            labels: LabelTensorizer.Config = LabelTensorizer.Config()
            # for metric reporter
            num_tokens: NtokensTensorizer.Config = NtokensTensorizer.Config(
                names=["tokens1", "tokens2"], indexes=[2, 2])

        inputs: ModelInput = ModelInput()
        encoder: TransformerSentenceEncoderBase.Config = (
            HuggingFaceBertSentenceEncoder.Config())
        # Decoder is a fully connected layer that expects concatenated encodings.
        # So, if decoder is provided we will concatenate the encodings from the
        # encoders and then pass to the decoder.
        decoder: Optional[MLPDecoder.Config] = MLPDecoder.Config()
        shared_encoder: bool = True