class Config(BaseModel.Config): class ModelInput(Model.Config.ModelInput): tokens: TokenTensorizer.Config = TokenTensorizer.Config( add_bos_token=True, add_eos_token=True) inputs: ModelInput = ModelInput() embedding: WordEmbedding.Config = WordEmbedding.Config() representation: BiLSTM.Config = BiLSTM.Config(bidirectional=False) decoder: Optional[MLPDecoder.Config] = MLPDecoder.Config() output_layer: LMOutputLayer.Config = LMOutputLayer.Config() tied_weights: bool = False stateful: bool = False
class Config(BaseModel.Config): class ModelInput(Model.Config.ModelInput): tokens: Optional[TokenTensorizer.Config] = TokenTensorizer.Config( add_bos_token=True, add_eos_token=True) inputs: ModelInput = ModelInput() embedding: WordEmbedding.Config = WordEmbedding.Config() representation: Union[BiLSTM.Config, CNN.Config] = BiLSTM.Config(bidirectional=False) decoder: Optional[MLPDecoder.Config] = MLPDecoder.Config() output_layer: LMOutputLayer.Config = LMOutputLayer.Config() tied_weights: bool = False stateful: bool = False caffe2_format: ExporterType = ExporterType.PREDICTOR
class Config(BaseModel.Config): class InputConfig(ConfigBase): tokens: BERTTensorizer.Config = BERTTensorizer.Config(max_seq_len=128) inputs: InputConfig = InputConfig() encoder: TransformerSentenceEncoderBase.Config = TransformerSentenceEncoder.Config() decoder: MLPDecoder.Config = MLPDecoder.Config() output_layer: LMOutputLayer.Config = LMOutputLayer.Config() mask_prob: float = 0.15 mask_bos: bool = False # masking masking_strategy: MaskingStrategy = MaskingStrategy.RANDOM # tie weights determines whether the input embedding weights are used # in the output vocabulary projection as well tie_weights: bool = True
class Config(ConfigBase): """ Configuration class for `LMLSTM`. Attributes: representation (BiLSTM.Config): Config for the BiLSTM representation. decoder (MLPDecoder.Config): Config for the MLP Decoder. output_layer (LMOutputLayer.Config): Config for the language model output layer. tied_weights (bool): If `True` use a common weights matrix between the word embeddings and the decoder. Defaults to `False`. stateful (bool): If `True`, do not reset hidden state of LSTM across batches. """ representation: BiLSTM.Config = BiLSTM.Config(bidirectional=False) decoder: MLPDecoder.Config = MLPDecoder.Config() output_layer: LMOutputLayer.Config = LMOutputLayer.Config() tied_weights: bool = False stateful: bool = False