def test_basic(self):
        # Setup embedding
        num_embeddings = 5
        lstm_dim = 8
        embedding_module = WordSeqEmbedding(
            lstm_config=BiLSTM.Config(lstm_dim=lstm_dim,
                                      num_layers=2,
                                      bidirectional=True),
            num_embeddings=num_embeddings,
            word_embed_dim=4,
            embeddings_weight=None,
            init_range=[-1, 1],
            unk_token_idx=4,
        )
        # bidirectional
        output_dim = lstm_dim * 2
        self.assertEqual(embedding_module.embedding_dim, output_dim)

        # Check output shape
        input_batch_size, max_seq_len, max_token_count = 4, 3, 5
        token_seq_idx = torch.randint(
            low=0,
            high=num_embeddings,
            size=[input_batch_size, max_seq_len, max_token_count],
        )
        seq_token_count = torch.randint(low=1,
                                        high=max_token_count,
                                        size=[input_batch_size, max_seq_len])
        output_embedding = embedding_module(token_seq_idx, seq_token_count)

        expected_output_dims = [input_batch_size, max_seq_len, output_dim]
        self.assertEqual(list(output_embedding.size()), expected_output_dims)
예제 #2
0
 class Config(ConfigBase):
     version: int = 0
     lstm: BiLSTM.Config = BiLSTM.Config()
     ablation: AblationParams = AblationParams()
     constraints: RNNGConstraints = RNNGConstraints()
     max_open_NT: int = 10
     dropout: float = 0.1
     compositional_type: CompositionalType = CompositionalType.BLSTM
예제 #3
0
    class Config(ConfigBase):
        class CompositionalType(Enum):
            """Whether to use summation of the vectors or a BiLSTM based composition to
             generate embedding for a subtree"""

            BLSTM = "blstm"
            SUM = "sum"

        class AblationParams(ConfigBase):
            """Ablation parameters.

            Attributes:
                use_buffer (bool): whether to use the buffer LSTM
                use_stack (bool): whether to use the stack LSTM
                use_action (bool): whether to use the action LSTM
                use_last_open_NT_feature (bool): whether to use the last open
                    non-terminal as a 1-hot feature when computing representation
                    for the action classifier
            """

            use_buffer: bool = True
            use_stack: bool = True
            use_action: bool = True
            use_last_open_NT_feature: bool = False

        class RNNGConstraints(ConfigBase):
            """Constraints when computing valid actions.

            Attributes:
                intent_slot_nesting (bool): for the intent slot models, the top level
                    non-terminal has to be an intent, an intent can only have slot
                    non-terminals as children and vice-versa.

                ignore_loss_for_unsupported (bool): if the data has "unsupported" label,
                    that is if the label has a substring "unsupported" in it, do not
                    compute loss
                no_slots_inside_unsupported (bool): if the data has "unsupported" label,
                    that is if the label has a substring "unsupported" in it, do not
                    predict slots inside this label.
            """

            intent_slot_nesting: bool = True
            ignore_loss_for_unsupported: bool = False
            no_slots_inside_unsupported: bool = True

        # version 0 - initial implementation
        # version 1 - beam search
        # version 2 - use zero init state rather than random
        # version 3 - add beam search input params
        version: int = 2
        lstm: BiLSTM.Config = BiLSTM.Config()
        ablation: AblationParams = AblationParams()
        constraints: RNNGConstraints = RNNGConstraints()
        max_open_NT: int = 10
        dropout: float = 0.1
        beam_size: int = 1
        top_k: int = 1
        compositional_type: CompositionalType = CompositionalType.BLSTM
예제 #4
0
파일: model.py 프로젝트: shreydesai/pytext
 class Config(ConfigBase):
     lstm: BiLSTM.Config = BiLSTM.Config()
     ablation: RNNGParser.Config.AblationParams = RNNGParser.Config.AblationParams(
     )
     constraints: RNNGParser.Config.RNNGConstraints = (
         RNNGParser.Config.RNNGConstraints())
     max_open_NT: int = 10
     dropout: float = 0.1
     compositional_type: RNNGParser.Config.CompositionalType = (
         RNNGParser.Config.CompositionalType.BLSTM)
예제 #5
0
    class Config(BaseModel.Config):
        class ModelInput(Model.Config.ModelInput):
            tokens: TokenTensorizer.Config = TokenTensorizer.Config(
                add_bos_token=True, add_eos_token=True)

        inputs: ModelInput = ModelInput()
        embedding: WordEmbedding.Config = WordEmbedding.Config()
        representation: BiLSTM.Config = BiLSTM.Config(bidirectional=False)
        decoder: Optional[MLPDecoder.Config] = MLPDecoder.Config()
        output_layer: LMOutputLayer.Config = LMOutputLayer.Config()
        tied_weights: bool = False
        stateful: bool = False
예제 #6
0
    class Config(BaseModel.Config):
        class ModelInput(Model.Config.ModelInput):
            tokens: Optional[TokenTensorizer.Config] = TokenTensorizer.Config(
                add_bos_token=True, add_eos_token=True)

        inputs: ModelInput = ModelInput()
        embedding: WordEmbedding.Config = WordEmbedding.Config()
        representation: Union[BiLSTM.Config,
                              CNN.Config] = BiLSTM.Config(bidirectional=False)
        decoder: Optional[MLPDecoder.Config] = MLPDecoder.Config()
        output_layer: LMOutputLayer.Config = LMOutputLayer.Config()
        tied_weights: bool = False
        stateful: bool = False
        caffe2_format: ExporterType = ExporterType.PREDICTOR
예제 #7
0
    class Config(RepresentationBase.Config):
        """
        Configuration class for `BiLSTM`.

        Attributes:
            dropout (float): Dropout probability to use. Defaults to 0.4.
            lstm (BiLSTM.Config): Config for the BiLSTM.
            pooling (ConfigBase): Config for the underlying pooling module.
            mlp_decoder (MLPDecoder.Config): Config for the non-linear
                projection module.
        """

        dropout: float = 0.4
        lstm: BiLSTM.Config = BiLSTM.Config()
        pooling: Union[SelfAttention.Config, MaxPool.Config, MeanPool.Config,
                       NoPool.Config] = SelfAttention.Config()
        mlp_decoder: Optional[MLPDecoder.Config] = None
예제 #8
0
    class Config(EmbeddingBase.Config):
        word_embed_dim: int = 100
        embedding_init_strategy: EmbedInitStrategy = EmbedInitStrategy.RANDOM
        embedding_init_range: Optional[List[float]] = None
        embeddding_init_std: Optional[float] = 0.02
        padding_idx: Optional[int] = None

        lstm: BiLSTM.Config = BiLSTM.Config()

        # [BEGIN] pretrained embedding related config
        pretrained_embeddings_path: str = ""
        #: If `pretrained_embeddings_path` and `vocab_from_pretrained_embeddings` are set,
        #: only the first `vocab_size` tokens in the file will be added to the vocab.
        vocab_size: int = 0
        lowercase_tokens: bool = True
        skip_header: bool = True
        delimiter: str = " "
예제 #9
0
    class Config(ConfigBase):
        """
        Configuration class for `LMLSTM`.

        Attributes:
            representation (BiLSTM.Config): Config for the BiLSTM representation.
            decoder (MLPDecoder.Config): Config for the MLP Decoder.
            output_layer (LMOutputLayer.Config): Config for the language model
                output layer.
            tied_weights (bool): If `True` use a common weights matrix between
                the word embeddings and the decoder. Defaults to `False`.
            stateful (bool): If `True`, do not reset hidden state of LSTM
                across batches.
        """

        representation: BiLSTM.Config = BiLSTM.Config(bidirectional=False)
        decoder: MLPDecoder.Config = MLPDecoder.Config()
        output_layer: LMOutputLayer.Config = LMOutputLayer.Config()
        tied_weights: bool = False
        stateful: bool = False
예제 #10
0
 def test_onnx_export(self):
     # Setup embedding
     num_embeddings = 5
     lstm_dim = 8
     embedding_module = WordSeqEmbedding(
         lstm_config=BiLSTM.Config(lstm_dim=lstm_dim,
                                   num_layers=2,
                                   bidirectional=True),
         num_embeddings=num_embeddings,
         word_embed_dim=4,
         embeddings_weight=None,
         init_range=[-1, 1],
         unk_token_idx=4,
     )
     input_batch_size, max_seq_len, max_token_count = 1, 3, 5
     seq_token_idx = torch.randint(
         low=0,
         high=num_embeddings,
         size=[input_batch_size, max_seq_len, max_token_count],
     )
     seq_token_count = torch.randint(low=1,
                                     high=max_token_count,
                                     size=[input_batch_size, max_seq_len])
     dummy_inputs = (seq_token_idx, seq_token_count)
     with tempfile.TemporaryFile() as tmp_file:
         with torch.no_grad():
             torch.onnx._export(
                 embedding_module,
                 dummy_inputs,
                 tmp_file,
                 input_names=["seq_token_idx", "seq_token_count"],
                 output_names=["embedding"],
                 export_params=True,
                 operator_export_type=OperatorExportTypes.
                 ONNX_ATEN_FALLBACK,
                 opset_version=9,
                 export_type=ExportTypes.ZIP_ARCHIVE,
             )
         # make sure caffe2 can load
         caffe2_backend.prepare_zip_archive(tmp_file)
예제 #11
0
 class Config(RepresentationBase.Config):
     dropout: float = 0.4
     lstm: BiLSTM.Config = BiLSTM.Config()
     slot_attention: SlotAttention.Config = SlotAttention.Config()
     mlp_decoder: Optional[MLPDecoder.Config] = None