def __init__(
        self,
        vocab: Vocabulary,
        sequence_field_embedder: TextFieldEmbedder,
        structure_field_embedder: TextFieldEmbedder,
        predicted_loop_type_field_embedder: TextFieldEmbedder,
        seq2seq_encoder: Seq2SeqEncoder,
        loss: Loss,
        structure_field_attention: Optional[MatrixAttention] = None,
        predicted_loop_type_field_attention: Optional[MatrixAttention] = None,
        masked_lm: Optional[Model] = None,  # MaskedLanguageModel
        lm_is_trainable: bool = False,
        lm_matrix_attention: Optional[MatrixAttention] = None,
        lm_dropout: float = 0.0,
        emb_dropout: float = 0.0,
        bpps_aggegator: Optional[Aggregator] = None,
        bpp_dropout: float = 0.0,
        regularizer: RegularizerApplicator = None,
    ) -> None:
        super().__init__(vocab, regularizer)
        # embedders
        self._sequence_field_embedder = sequence_field_embedder
        self._structure_field_embedder = structure_field_embedder
        self._predicted_loop_type_field_embedder = predicted_loop_type_field_embedder

        self._seq2seq_encoder = seq2seq_encoder
        self._loss = loss

        self._structure_field_attention = structure_field_attention
        self._predicted_loop_type_field_attention = predicted_loop_type_field_attention

        # masked language models
        self._masked_lm = masked_lm
        if self._masked_lm is not None:
            self._masked_lm._tokens_masker = None
        if not lm_is_trainable and self._masked_lm is not None:
            self._masked_lm = self._masked_lm.eval()
        self._lm_matrix_attention = lm_matrix_attention

        self._lm_dropout = InputVariationalDropout(p=lm_dropout)
        self._emb_dropout = InputVariationalDropout(p=emb_dropout)

        self._bpps_aggegator = bpps_aggegator
        self._bpp_dropout = InputVariationalDropout(p=bpp_dropout)

        hidden_dim = self._seq2seq_encoder.get_output_dim()
        if self._masked_lm is not None:
            hidden_dim += self._masked_lm.get_output_dim()

        # we predict reactivity, deg_Mg_pH10, deg_Mg_50C, deg_pH10, deg_50C
        self._linear = torch.nn.Linear(hidden_dim, 5)
Beispiel #2
0
    def __init__(self, config: ParsingConfig):
        assert isinstance(config, ParsingConfig)
        super().__init__(config)
        self.config = config
        encoder_dim = config.decoder_config.output_dim

        if self.config.use_pos:
            self.pos_embedding = nn.Embedding(config.num_pos,
                                              config.pos_dim,
                                              padding_idx=0)
            encoder_dim += config.pos_dim

        self.head_arc_feedforward = FeedForward(encoder_dim, 1, config.arc_dim,
                                                Activation.by_name("elu")())
        self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward)

        self.arc_attention = BilinearMatrixAttention(config.arc_dim,
                                                     config.arc_dim,
                                                     use_input_biases=True)

        self.head_tag_feedforward = FeedForward(encoder_dim, 1, config.tag_dim,
                                                Activation.by_name("elu")())
        self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward)

        self.tag_bilinear = torch.nn.modules.Bilinear(config.tag_dim,
                                                      config.tag_dim,
                                                      config.num_labels)
        self.dropout = InputVariationalDropout(config.dropout)
        self.use_mst_decoding_for_validation = config.use_mst_decoding_for_validation
Beispiel #3
0
    def __init__(self, vocab: Vocabulary, token_embedder: TokenEmbedder,
                 num_labels: int) -> None:
        super().__init__(vocab)

        self.word_embedders = BasicTextFieldEmbedder(
            {"tokens": token_embedder})
        self._encoder = LstmSeq2SeqEncoder(300, 300, 2)
        # self._encoder = PytorchTransformer(300, 3, 300, 4)

        self._matrix_attention = DotProductMatrixAttention()
        self._projection_feedforward = FeedForward(300 * 4, 1, 300,
                                                   torch.nn.ReLU(), 0.2)

        self._inference_encoder = LstmSeq2SeqEncoder(300, 300, 2)
        # self._inference_encoder = PytorchTransformer(300, 3, 300, 4)

        self.dropout = torch.nn.Dropout(0.3)
        self.rnn_input_dropout = InputVariationalDropout(0.3)

        self._output_feedforward = FeedForward(1200, 1, 300, torch.nn.ReLU(),
                                               0.2)
        self._output_logit = FeedForward(300, 1, num_labels, lambda x: x)

        self._num_labels = num_labels

        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()
Beispiel #4
0
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 phrase_layer: Seq2SeqEncoder,
                 pq_attention: MatrixAttention,
                 p_selfattention: MatrixAttention,
                 supports_pooling: Seq2VecEncoder,
                 query_pooling: Seq2VecEncoder,
                 candidates_pooling: Seq2VecEncoder,
                 decoder: Decoder,
                 dropout: float = 0.2,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(MultiStepParaRankModel, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder

        self.phrase_layer = phrase_layer

        self.pq_attention = pq_attention
        self.p_selfattention = p_selfattention

        self.supports_pooling = supports_pooling
        self.query_pooling = query_pooling
        self.candidates_pooling = candidates_pooling

        self.decoder = decoder

        self.dropout = InputVariationalDropout(p=dropout)

        self._support_accuracy = Auc()
        self._candidate_accuracy = CategoricalAccuracy()

        initializer(self)
    def __init__(
        self,
        vocab: Vocabulary,
        text_field_embedder: TextFieldEmbedder,
        encoder: Seq2SeqEncoder,
        matrix_attention: MatrixAttention,
        projection_feedforward: FeedForward,
        inference_encoder: Seq2SeqEncoder,
        output_feedforward: FeedForward,
        output_logit: FeedForward,
        dropout: float = 0.5,
        initializer: InitializerApplicator = InitializerApplicator(),
        **kwargs,
    ) -> None:
        super().__init__(vocab, **kwargs)

        self._text_field_embedder = text_field_embedder
        self._encoder = encoder

        self._matrix_attention = matrix_attention
        self._projection_feedforward = projection_feedforward

        self._inference_encoder = inference_encoder

        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
            self.rnn_input_dropout = InputVariationalDropout(dropout)
        else:
            self.dropout = None
            self.rnn_input_dropout = None

        self._output_feedforward = output_feedforward
        self._output_logit = output_logit

        self._num_labels = vocab.get_vocab_size(namespace="labels")

        check_dimensions_match(
            text_field_embedder.get_output_dim(),
            encoder.get_input_dim(),
            "text field embedding dim",
            "encoder input dim",
        )
        check_dimensions_match(
            encoder.get_output_dim() * 4,
            projection_feedforward.get_input_dim(),
            "encoder output dim",
            "projection feedforward input",
        )
        check_dimensions_match(
            projection_feedforward.get_output_dim(),
            inference_encoder.get_input_dim(),
            "proj feedforward output dim",
            "inference lstm input dim",
        )

        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()
        self._debug = 2

        initializer(self)
    def __init__(self,
                 config,
                 num_labels: int,
                 num_pos: int,
                 use_pos: bool,
                 arc_representation_dim: int,
                 arc_feedforward: FeedForward = None,
                 use_mst_decoding_for_validation: bool = True,
                 dropout: float = 0.) -> None:
        super(DistanceDependencyParser, self).__init__(config)
        self.bert = BertModel(config)
        self.apply(self.init_bert_weights)

        encoder_dim = config.hidden_size

        self.arc_feedforward = arc_feedforward or \
                                    FeedForward(encoder_dim, 1,
                                                arc_representation_dim,
                                                Activation.by_name("linear")())

        self.arc_attention = DistanceAttention()

        self._dropout = InputVariationalDropout(dropout)

        self.use_mst_decoding_for_validation = use_mst_decoding_for_validation

        self._attachment_scores = UndirectedAttachmentScores()
Beispiel #7
0
    def __init__(self, device):
        super(UnbiasedTopicalExtractorELMo, self).__init__()
        self.device = device
        self.tokenizer = WordTokenizer()
        options_file = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
        weight_file = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
        self.elmo_model = Elmo(options_file,
                               weight_file,
                               1,
                               do_layer_norm=False,
                               dropout=0.5)
        self.rnn_input_dropout = InputVariationalDropout(0.0)
        self.encoder = _Seq2SeqWrapper(torch.nn.LSTM)(input_size=1024,
                                                      hidden_size=300,
                                                      num_layers=1,
                                                      bidirectional=True)

        self._feature_feedforward_layer = torch.nn.Linear(600, 300)
        self._feature_feedforward_dropout = torch.nn.Dropout(0.5)
        self._feature_feedforward_activation = torch.nn.ReLU()
        self._class_classification_layer = torch.nn.Linear(300, 2)
        self._group_classification_layer = torch.nn.Linear(300, 2)
        init.xavier_uniform_(self._feature_feedforward_layer.weight)
        init.zeros_(self._feature_feedforward_layer.bias)
        init.xavier_uniform_(self._class_classification_layer.weight)
        init.zeros_(self._class_classification_layer.bias)
        init.xavier_uniform_(self._group_classification_layer.weight)
        init.zeros_(self._group_classification_layer.bias)
        self._class_loss = torch.nn.CrossEntropyLoss()
        self._group_loss = torch.nn.CrossEntropyLoss()
    def __init__(self, 
                    input_size: int,
                    hidden_size: int,
                    decoder_layer: MisoTransformerDecoderLayer, 
                    num_layers: int,
                    source_attention_layer: AttentionLayer,
                    target_attention_layer: AttentionLayer,
                    norm=None,
                    dropout=0.1,
                    use_coverage=True):
        super(MisoTransformerDecoder, self).__init__()

        self.input_proj_layer = torch.nn.Linear(input_size, hidden_size)

        self.layers = _get_clones(decoder_layer, num_layers)
        self.num_layers = num_layers
        self.norm = norm
        self.dropout = InputVariationalDropout(dropout)
        self.source_attn_layer = source_attention_layer
        self.target_attn_layer = target_attention_layer
        self.use_coverage = use_coverage

        self.prenorm = isinstance(decoder_layer, MisoPreNormTransformerDecoderLayer)

        if self.prenorm:
            self.final_norm = copy.deepcopy(decoder_layer.norm4)
Beispiel #9
0
    def __init__(self, device, vocab_dir):
        super(UnbiasedTopicalExtractorGloVe, self).__init__()
        self.device = device
        vocab = Vocabulary()
        self.vocab = vocab.from_files(vocab_dir)
        self.embedding = Embedding(
            embedding_dim=300,
            trainable=True,
            num_embeddings=self.vocab.get_vocab_size("tokens"),
            pretrained_file=
            "https://allennlp.s3.amazonaws.com/datasets/glove/glove.840B.300d.txt.gz"
        )
        self.tokenizer = WordTokenizer()
        self.token_indexer = SingleIdTokenIndexer(lowercase_tokens=True)
        self.rnn_input_dropout = InputVariationalDropout(0.5)
        self.encoder = _Seq2SeqWrapper(torch.nn.LSTM)(input_size=300,
                                                      hidden_size=300,
                                                      num_layers=1,
                                                      bidirectional=True)

        self._feature_feedforward_layer = torch.nn.Linear(600, 300)
        self._feature_feedforward_dropout = torch.nn.Dropout(0.5)
        self._feature_feedforward_activation = torch.nn.ReLU()
        self._class_classification_layer = torch.nn.Linear(300, 2)
        self._group_classification_layer = torch.nn.Linear(300, 2)
        init.xavier_uniform_(self._feature_feedforward_layer.weight)
        init.zeros_(self._feature_feedforward_layer.bias)
        init.xavier_uniform_(self._class_classification_layer.weight)
        init.zeros_(self._class_classification_layer.bias)
        init.xavier_uniform_(self._group_classification_layer.weight)
        init.zeros_(self._group_classification_layer.bias)
        self._class_loss = torch.nn.CrossEntropyLoss()
        self._group_loss = torch.nn.CrossEntropyLoss()
Beispiel #10
0
    def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 dropout: float = 0.0,
                 input_dropout: float = 0.0,
                 label_namespace: str = "pos",
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(PosTaggerMonolingual, self).__init__(vocab, regularizer)

        self.label_namespace = label_namespace
        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size(label_namespace)
        self.encoder = encoder
        self._dropout = InputVariationalDropout(dropout)
        self._input_dropout = Dropout(input_dropout)
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_classes))
        
        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")

        self.metrics = {
                "accuracy": CategoricalAccuracy(),
                "accuracy3": CategoricalAccuracy(top_k=3)
        }

        initializer(self)
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 output_feedforward: FeedForward,
                 output_logit: FeedForward,
                 dropout: float = 0.5,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)

        self._text_field_embedder = text_field_embedder
        self._encoder = encoder

        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
            self.rnn_input_dropout = InputVariationalDropout(dropout)
        else:
            self.dropout = None
            self.rnn_input_dropout = None

        self._output_feedforward = output_feedforward
        self._output_logit = output_logit

        self._num_labels = vocab.get_vocab_size(namespace="labels")

        check_dimensions_match(text_field_embedder.get_output_dim(),
                               encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")

        self._f1 = F1Measure(
            positive_label=vocab._token_to_index["labels"]["1"])
        self._loss = torch.nn.CrossEntropyLoss()

        initializer(self)
    def __init__(
            self,
            vocab: Vocabulary,
            text_field_embedder: TextFieldEmbedder,
            encoder: Seq2SeqEncoder,
            edge_model: graph_dependency_parser.components.edge_models.
        EdgeModel,
            loss_function: graph_dependency_parser.components.losses.EdgeLoss,
            pos_tag_embedding: Embedding = None,
            use_mst_decoding_for_validation: bool = True,
            dropout: float = 0.0,
            input_dropout: float = 0.0,
            initializer: InitializerApplicator = InitializerApplicator(),
            regularizer: Optional[RegularizerApplicator] = None,
            validation_evaluator: Optional[ValidationEvaluator] = None
    ) -> None:
        super(GraphDependencyParser, self).__init__(vocab, regularizer)

        self.validation_evaluator = validation_evaluator

        self.text_field_embedder = text_field_embedder
        self.encoder = encoder

        self._pos_tag_embedding = pos_tag_embedding or None
        self._dropout = InputVariationalDropout(dropout)
        self._input_dropout = Dropout(input_dropout)
        self._head_sentinel = torch.nn.Parameter(
            torch.randn([1, 1, encoder.get_output_dim()]))

        representation_dim = text_field_embedder.get_output_dim()
        if pos_tag_embedding is not None:
            representation_dim += pos_tag_embedding.get_output_dim()

        check_dimensions_match(representation_dim, encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")
        check_dimensions_match(encoder.get_output_dim(),
                               edge_model.encoder_dim(), "encoder output dim",
                               "input dim edge model")

        self.use_mst_decoding_for_validation = use_mst_decoding_for_validation

        tags = self.vocab.get_token_to_index_vocabulary("pos")
        punctuation_tag_indices = {
            tag: index
            for tag, index in tags.items() if tag in POS_TO_IGNORE
        }
        self._pos_to_ignore = set(punctuation_tag_indices.values())
        logger.info(
            f"Found POS tags corresponding to the following punctuation : {punctuation_tag_indices}. "
            "Ignoring words with these POS tags for evaluation.")

        self._attachment_scores = AttachmentScores()
        initializer(self)

        self.edge_model = edge_model
        self.loss_function = loss_function

        #Being able to detect what state we are in, probably not the best idea.
        self.current_epoch = 1
        self.pass_over_data_just_started = True
Beispiel #13
0
    def __init__(
            self,
            vocab: Vocabulary,
            option_encoder: Seq2SeqEncoder,
            input_dropout: float = 0.3,
            initializer: InitializerApplicator = InitializerApplicator(),
    ):
        super(LSTMBatchNormBUANonTagGlobalFullNoFinalImage,
              self).__init__(vocab)
        self.rnn_input_dropout = TimeDistributed(
            InputVariationalDropout(
                input_dropout)) if input_dropout > 0 else None

        self.obj_downsample = torch.nn.Sequential(
            torch.nn.Dropout(p=0.1),
            torch.nn.Linear(2048, 512),
            torch.nn.ReLU(inplace=True),
        )
        self.image_BN = BatchNorm1d(512)

        self.option_encoder = TimeDistributed(option_encoder)
        self.option_BN = torch.nn.Sequential(BatchNorm1d(512))
        self.query_BN = torch.nn.Sequential(BatchNorm1d(512))
        self.final_mlp = torch.nn.Sequential(
            torch.nn.Linear(1024, 512),
            torch.nn.ReLU(inplace=True),
        )
        self.final_BN = torch.nn.Sequential(BatchNorm1d(512))
        self.final_mlp_linear = torch.nn.Sequential(torch.nn.Linear(512, 1))
        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()
        initializer(self)
Beispiel #14
0
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2VecEncoder,
                 decoders: Dict[str, Model],
                 tasks: List[str],
                 task_types: List[str],
                 dropout: float = None,
                 **kwargs) -> None:
        super().__init__(vocab, **kwargs)
        self._text_field_embedder = text_field_embedder

        self.encoder = encoder
        self._classifier_input_dim = self.encoder.get_output_dim()

        if dropout:
            self._dropout = InputVariationalDropout(dropout)
            self._dropout_sents = torch.nn.Dropout(dropout)
        else:
            self._dropout = None

        self.decoders = torch.nn.ModuleDict(decoders)

        self.tasks = tasks
        self.task_types = task_types

        self.counter = 0
        self.metrics = {}
Beispiel #15
0
 def __init__(self, config: BaseDecoderConfig):
     assert isinstance(config, BaseDecoderConfig)
     super().__init__()
     self.config = config
     self.weight = nn.Parameter(torch.zeros(config.num_encoder_layers))
     self.mapping: nn.Module = nn.Identity()
     self.dropout = InputVariationalDropout(config.input_dropout)
Beispiel #16
0
    def __init__(
        self,
        similarity_function: SimilarityFunction,
        response_projection_feedforward: FeedForward,
        response_inference_encoder: Seq2SeqEncoder,
        response_input_feedforward: Optional[FeedForward] = None,
        source_input_feedforward: Optional[FeedForward] = None,
        source_projection_feedforward: Optional[FeedForward] = None,
        source_inference_encoder: Optional[Seq2SeqEncoder] = None,
        dropout: float = 0.5,
        #whether to only consider the response and alignments from the source to response
        response_only=False
    ) -> None:

        super().__init__()

        self._response_input_feedforward = response_input_feedforward
        self._response_projection_feedforward = response_projection_feedforward
        self._response_inference_encoder = response_inference_encoder

        self._source_input_feedforward = source_input_feedforward or response_input_feedforward
        self._source_projection_feedforward = source_projection_feedforward or response_projection_feedforward
        self._source_inference_encoder = source_inference_encoder or response_inference_encoder

        self._matrix_attention = LegacyMatrixAttention(similarity_function)

        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
            self.rnn_input_dropout = InputVariationalDropout(dropout)
        else:
            self.dropout = None
            self.rnn_input_dropout = None

        self._response_only = response_only
    def __init__(self,
                 vocab: Vocabulary,
                 embedder: TextFieldEmbedder,
                 encoder: Seq2VecEncoder,
                 feedforward: Optional[FeedForward] = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 dropout: float = 0.0,
                 label_name: str = 'target-sentiment-labels') -> None:
        super().__init__(vocab, regularizer)
        '''
        :param vocab: A Vocabulary, required in order to compute sizes 
                      for input/output projections.
        :param embedder: Used to embed the text.
        :param encoder: Encodes the sentence/text. E.g. LSTM
        :param feedforward: An optional feed forward layer to apply after the 
                            encoder
        :param initializer: Used to initialize the model parameters.
        :param regularizer: If provided, will be used to calculate the 
                            regularization penalty during training.
        :param dropout: To apply dropout after each layer apart from the last 
                        layer. All dropout that is applied to timebased data 
                        will be `variational dropout`_ all else will be  
                        standard dropout.
        :param label_name: Name of the label name space.
        
        This is based on the LSTM model by 
        `Tang et al. 2016 <https://www.aclweb.org/anthology/C16-1311.pdf>`_
        
        '''
        self.label_name = label_name
        self.embedder = embedder
        self.encoder = encoder
        self.num_classes = self.vocab.get_vocab_size(self.label_name)
        self.feedforward = feedforward

        if feedforward is not None:
            output_dim = self.feedforward.get_output_dim()
        else:
            output_dim = self.encoder.get_output_dim()
        self.label_projection = Linear(output_dim, self.num_classes)

        self.metrics = {"accuracy": CategoricalAccuracy()}
        self.f1_metrics = {}
        # F1 Scores
        label_index_name = self.vocab.get_index_to_token_vocabulary(
            self.label_name)
        for label_index, _label_name in label_index_name.items():
            _label_name = f'F1_{_label_name.capitalize()}'
            self.f1_metrics[_label_name] = F1Measure(label_index)
        self._variational_dropout = InputVariationalDropout(dropout)
        self._naive_dropout = Dropout(dropout)
        check_dimensions_match(embedder.get_output_dim(),
                               encoder.get_input_dim(), 'Embedding', 'Encoder')
        if self.feedforward is not None:
            check_dimensions_match(encoder.get_output_dim(),
                                   feedforward.get_input_dim(), 'Encoder',
                                   'FeedForward')
        initializer(self)
Beispiel #18
0
    def __init__(self, vocab: Vocabulary,
                 encoder_dim: int,
                 label_dim: int,
                 edge_dim: int,
                 dropout: float,
                 tag_feedforward: FeedForward = None,
                 arc_feedforward: FeedForward = None) -> None:
        """
        Parameters
        ----------
        vocab : ``Vocabulary``, required
            A Vocabulary, required in order to compute sizes for input/output projections.
        encoder_dim : ``int``, required.
            The output dimension of the encoder.
        label_dim : ``int``, required.
            The dimension of the MLPs used for dependency tag prediction.
        edge_dim : ``int``, required.
            The dimension of the MLPs used for head arc prediction.
        tag_feedforward : ``FeedForward``, optional, (default = None).
            The feedforward network used to produce tag representations.
            By default, a 1 layer feedforward network with an elu activation is used.
        arc_feedforward : ``FeedForward``, optional, (default = None).
            The feedforward network used to produce arc representations.
            By default, a 1 layer feedforward network with an elu activation is used.
        dropout : ``float``, optional, (default = 0.0)
            The variational dropout applied to the output of the encoder and MLP layers.
        """
        super(DMEdges, self).__init__(vocab)
        self._encoder_dim = encoder_dim

        self.head_arc_feedforward = arc_feedforward or \
                                    FeedForward(encoder_dim, 1,
                                                edge_dim,
                                                Activation.by_name("elu")())
        self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward)

        self.arc_attention = BilinearMatrixAttention(edge_dim,
                                                     edge_dim,
                                                     use_input_biases=True)

        num_labels = vocab.get_vocab_size("head_tags") #= edge labels

        self.head_tag_feedforward = tag_feedforward or \
                                    FeedForward(encoder_dim, 1,
                                                label_dim,
                                                Activation.by_name("elu")())
        self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward)

        self.tag_bilinear = torch.nn.modules.Bilinear(label_dim,
                                                      label_dim,
                                                      num_labels)

        self._dropout = InputVariationalDropout(dropout)

        check_dimensions_match(label_dim, self.head_tag_feedforward.get_output_dim(),
                               "tag representation dim", "tag feedforward output dim")
        check_dimensions_match(edge_dim, self.head_arc_feedforward.get_output_dim(),
                               "arc representation dim", "arc feedforward output dim")
Beispiel #19
0
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 tag_representation_dim: int,
                 arc_representation_dim: int,
                 pos_tag_embedding: Embedding = None,
                 use_mst_decoding_for_validation: bool = True,
                 dropout: float = 0.0,
                 input_dropout: float = 0.0,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(BiaffineDependencyParser, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.encoder = encoder

        encoder_dim = encoder.get_output_dim()
        self.head_arc_projection = torch.nn.Linear(encoder_dim,
                                                   arc_representation_dim)
        self.child_arc_projection = torch.nn.Linear(encoder_dim,
                                                    arc_representation_dim)
        self.arc_attention = BilinearMatrixAttention(arc_representation_dim,
                                                     arc_representation_dim,
                                                     use_input_biases=True)

        num_labels = self.vocab.get_vocab_size("head_tags")
        self.head_tag_projection = torch.nn.Linear(encoder_dim,
                                                   tag_representation_dim)
        self.child_tag_projection = torch.nn.Linear(encoder_dim,
                                                    tag_representation_dim)
        self.tag_bilinear = torch.nn.modules.Bilinear(tag_representation_dim,
                                                      tag_representation_dim,
                                                      num_labels)

        self._pos_tag_embedding = pos_tag_embedding or None
        self._dropout = InputVariationalDropout(dropout)
        self._input_dropout = Dropout(input_dropout)
        representation_dim = text_field_embedder.get_output_dim()
        if pos_tag_embedding is not None:
            representation_dim += pos_tag_embedding.get_output_dim()
        check_dimensions_match(representation_dim, encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")

        self.use_mst_decoding_for_validation = use_mst_decoding_for_validation

        tags = self.vocab.get_token_to_index_vocabulary("pos")
        punctuation_tag_indices = {
            tag: index
            for tag, index in tags.items() if tag in POS_TO_IGNORE
        }
        self._pos_to_ignore = set(punctuation_tag_indices.values())
        logger.info(
            f"Found POS tags correspoding to the following punctuation : {punctuation_tag_indices}. "
            "Ignoring words with these POS tags for evaluation.")

        self._attachment_scores = AttachmentScores()
        initializer(self)
Beispiel #20
0
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 text_encoder: Seq2SeqEncoder,
                 classifier_feedforward: Optional[FeedForward] = None,
                 dropout: Optional[float] = 0.0,
                 code_switching_regularizer: Optional[float] = 0.0,
                 bivalency_regularizer: Optional[float] = 0.0,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        '''
        :param dropout: The amount of dropout to apply. Dropout is applied 
                        after each non-linear layer and the word embeddings 
                        lookup. Two types of dropout are applied, variational 
                        dropout is applied if the input is to the dropout is 
                        a sequence of vectors (each vector in the sequence 
                        representing a word), and normal dropout if the input 
                        is a vector.
        :param code_switching_regularizer: The weight associated to the code 
                                           switching lexicon regulisation the 
                                           lower the less affect it has. This 
                                           requires that the dataset reader is 
                                           going to supply the code switching 
                                           arrays for the forward function of 
                                           this class. If set a good values is 
                                           0.001
        :param bivalency_regularizer: The weight associated to the bivalency 
                                      regulisation the lower the less affect it 
                                      has. This requires that the dataset 
                                      reader is going to supply the bivalency
                                      arrays for the forward function of this 
                                      class.
        '''
        super().__init__(vocab, regularizer)
        self._naive_dropout = Dropout(dropout)
        self._variational_dropout = InputVariationalDropout(dropout)

        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size("labels")
        self.text_encoder = text_encoder
        text_encoder_dim = text_encoder.get_output_dim()
        # Attention parameters
        self.project_encoded_text = TimeDistributed(
            Linear(text_encoder_dim, text_encoder_dim))
        self.attention_vector = Parameter(torch.Tensor(text_encoder_dim))
        self.reset_parameters()
        self.attention_layer = DotProductAttention(normalize=True)

        self.classifier_feedforward = classifier_feedforward
        output_dim = text_encoder_dim
        if classifier_feedforward:
            output_dim = classifier_feedforward.get_output_dim()
        self.label_projection = Linear(output_dim, self.num_classes)
        self.metrics = {"accuracy": CategoricalAccuracy()}
        self.code_switching_regularizer = code_switching_regularizer
        self.bivalency_regularizer = bivalency_regularizer
        self.loss = torch.nn.CrossEntropyLoss()
        initializer(self)
Beispiel #21
0
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 share_encoder: _EncoderBase,
                 private_encoder: _EncoderBase,
                 domain_embeddings: Embedding,
                 s_domain_discriminator: Discriminator,
                 p_domain_discriminator: Discriminator,
                 valid_discriminator: Discriminator,
                 dropout: float = 0.0,
                 input_dropout: float = 0.0,
                 label_smoothing: float = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(SentimentClassifier, self).__init__(vocab, regularizer)

        if isinstance(share_encoder, Seq2VecEncoder) and isinstance(
                private_encoder, Seq2VecEncoder):
            self._encoder = CNNEncoder(vocab,
                                       text_field_embedder,
                                       share_encoder,
                                       private_encoder,
                                       with_domain_embedding=True,
                                       domain_embeddings=domain_embeddings,
                                       input_dropout=input_dropout)
        else:
            self._encoder = RNNEncoder(vocab,
                                       text_field_embedder,
                                       share_encoder,
                                       private_encoder,
                                       with_domain_embedding=True,
                                       domain_embeddings=domain_embeddings,
                                       input_dropout=input_dropout)

        self._num_classes = self.vocab.get_vocab_size("label")
        self._sentiment_discriminator = Discriminator(
            self._encoder.get_output_dim(), self._num_classes)
        self._s_domain_discriminator = s_domain_discriminator
        self._p_domain_discriminator = p_domain_discriminator
        self._valid_discriminator = valid_discriminator
        self._dropout = InputVariationalDropout(dropout)
        self._input_dropout = Dropout(input_dropout)
        self._label_smoothing = label_smoothing

        self.metrics = {
            "sentiment_acc": CategoricalAccuracy(),
            "p_domain_acc": CategoricalAccuracy(),
            "s_domain_acc": CategoricalAccuracy(),
            "valid_acc": CategoricalAccuracy()
        }

        self._loss = torch.nn.CrossEntropyLoss()
        self._domain_loss = torch.nn.CrossEntropyLoss()
        # TODO torch.nn.BCELoss
        self._valid_loss = torch.nn.BCEWithLogitsLoss()

        initializer(self)
Beispiel #22
0
    def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 similarity_function: SimilarityFunction,
                 projection_feedforward: FeedForward,
                 inference_encoder: Seq2SeqEncoder,
                 output_feedforward: FeedForward,
                 output_logit: FeedForward,
                 parser_model_path: str,
                 parser_cuda_device: int,
                 freeze_parser: bool,
                 dropout: float = 0.5,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)

        self._text_field_embedder = text_field_embedder
        self._encoder = encoder

        self._matrix_attention = LegacyMatrixAttention(similarity_function)
        self._projection_feedforward = projection_feedforward

        self._inference_encoder = inference_encoder

        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
            self.rnn_input_dropout = InputVariationalDropout(dropout)
        else:
            self.dropout = None
            self.rnn_input_dropout = None

        self._output_feedforward = output_feedforward
        self._output_logit = output_logit

        self._num_labels = vocab.get_vocab_size(namespace="labels")

        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")
        check_dimensions_match(encoder.get_output_dim() * 4, projection_feedforward.get_input_dim(),
                               "encoder output dim", "projection feedforward input")
        check_dimensions_match(projection_feedforward.get_output_dim(), inference_encoder.get_input_dim(),
                               "proj feedforward output dim", "inference lstm input dim")

        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()

        self._parser = load_archive(parser_model_path,
                                    cuda_device=parser_cuda_device).model
        self._parser._head_sentinel.requires_grad = False
        for child in self._parser.children():
            for param in child.parameters():
                param.requires_grad = False
        if not freeze_parser:
            for param in self._parser.encoder.parameters():
                param.requires_grad = True

        initializer(self)
 def __init__(self, encoder: Seq2SeqEncoder, dropout: float = 0.5) -> None:
     super().__init__()
     self._encoder = encoder
     if dropout:
         self.dropout = torch.nn.Dropout(dropout)
         self.rnn_input_dropout = InputVariationalDropout(dropout)
     else:
         self.dropout = None
         self.rnn_input_dropout = None
Beispiel #24
0
    def __init__(self,
                 vocab: Vocabulary,
                 span_encoder: Seq2SeqEncoder,
                 reasoning_encoder: Seq2SeqEncoder,
                 input_dropout: float = 0.3,
                 hidden_dim_maxpool: int = 1024,
                 class_embs: bool=True,
                 reasoning_use_obj: bool=True,
                 reasoning_use_answer: bool=True,
                 reasoning_use_question: bool=True,
                 pool_reasoning: bool = True,
                 pool_answer: bool = True,
                 pool_question: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 ):
        super(AttentionQA, self).__init__(vocab)

        self.detector = SimpleDetector(pretrained=True, average_pool=True, semantic=class_embs, final_dim=512)
        ###################################################################################################

        self.rnn_input_dropout = TimeDistributed(InputVariationalDropout(input_dropout)) if input_dropout > 0 else None

        self.span_encoder = TimeDistributed(span_encoder)
        self.reasoning_encoder = TimeDistributed(reasoning_encoder)

        self.span_attention = BilinearMatrixAttention(
            matrix_1_dim=span_encoder.get_output_dim(),
            matrix_2_dim=span_encoder.get_output_dim(),
        )

        self.obj_attention = BilinearMatrixAttention(
            matrix_1_dim=span_encoder.get_output_dim(),
            matrix_2_dim=self.detector.final_dim,
        )

        self.reasoning_use_obj = reasoning_use_obj
        self.reasoning_use_answer = reasoning_use_answer
        self.reasoning_use_question = reasoning_use_question
        self.pool_reasoning = pool_reasoning
        self.pool_answer = pool_answer
        self.pool_question = pool_question
        dim = sum([d for d, to_pool in [(reasoning_encoder.get_output_dim(), self.pool_reasoning),
                                        (span_encoder.get_output_dim(), self.pool_answer),
                                        (span_encoder.get_output_dim(), self.pool_question)] if to_pool])

        self.final_mlp = torch.nn.Sequential(
            torch.nn.Dropout(input_dropout, inplace=False),
            torch.nn.Linear(dim, hidden_dim_maxpool),
            torch.nn.ReLU(inplace=True),
            torch.nn.Dropout(input_dropout, inplace=False),
            torch.nn.Linear(hidden_dim_maxpool, 1),
        )
        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()
        initializer(self)
Beispiel #25
0
    def __init__(self, vocab: Vocabulary,
                 name:str,
                 edge_model: EdgeModel,
                 loss_function: EdgeLoss,
                 supertagger: Supertagger,
                 lexlabeltagger: Supertagger,
                 supertagger_loss : SupertaggingLoss,
                 lexlabel_loss : SupertaggingLoss,
                 output_null_lex_label : bool = True,
                 loss_mixing : Dict[str,float] = None,
                 dropout : float = 0.0,
                 validation_evaluator: Optional[Evaluator] = None,
                 regularizer: Optional[RegularizerApplicator] = None):

        super().__init__(vocab,regularizer)
        self.name = name
        self.edge_model = edge_model
        self.supertagger = supertagger
        self.lexlabeltagger = lexlabeltagger
        self.supertagger_loss = supertagger_loss
        self.lexlabel_loss = lexlabel_loss
        self.loss_function = loss_function
        self.loss_mixing = loss_mixing or dict()
        self.validation_evaluator = validation_evaluator
        self.output_null_lex_label = output_null_lex_label

        self._dropout = InputVariationalDropout(dropout)

        loss_names = ["edge_existence", "edge_label", "supertagging", "lexlabel"]
        for loss_name in loss_names:
            if loss_name not in self.loss_mixing:
                self.loss_mixing[loss_name] = 1.0
                logger.info(f"Loss name {loss_name} not found in loss_mixing, using a weight of 1.0")
            else:
                if self.loss_mixing[loss_name] is None:
                    if loss_name not in ["supertagging", "lexlabel"]:
                        raise ConfigurationError("Only the loss mixing coefficients for supertagging and lexlabel may be None, but not "+loss_name)

        not_contained = set(self.loss_mixing.keys()) - set(loss_names)
        if len(not_contained):
            logger.critical(f"The following loss name(s) are unknown: {not_contained}")
            raise ValueError(f"The following loss name(s) are unknown: {not_contained}")

        self._supertagging_acc = CategoricalAccuracy()
        self._top_6supertagging_acc = CategoricalAccuracy(top_k=6)
        self._lexlabel_acc = CategoricalAccuracy()
        self._attachment_scores = AttachmentScores()
        self.current_epoch = 0

        tags = self.vocab.get_token_to_index_vocabulary("pos")
        punctuation_tag_indices = {tag: index for tag, index in tags.items() if tag in POS_TO_IGNORE}
        self._pos_to_ignore = set(punctuation_tag_indices.values())
        logger.info(f"Found POS tags corresponding to the following punctuation : {punctuation_tag_indices}. "
                    "Ignoring words with these POS tags for evaluation.")
Beispiel #26
0
    def __init__(self,
                 vocab: Vocabulary,
                 response_embedder: TextFieldEmbedder,
                 response_word_attention: Seq2VecEncoder,
                 response_encoder: Seq2SeqEncoder,
                 response_sentence_attention: Seq2VecEncoder,
                 output_feedforward: FeedForward,
                 op_embedder: Optional[TextFieldEmbedder] = None,
                 op_word_attention: Optional[Seq2VecEncoder] = None,
                 op_encoder: Optional[Seq2SeqEncoder] = None,
                 dropout: float = 0.5,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 feature_feedforward: FeedForward = None) -> None:

        super().__init__(vocab, regularizer)

        self._response_embedder = response_embedder
        self._response_word_attention = response_word_attention
        self._response_encoder = response_encoder
        self._response_sentence_attention = response_sentence_attention

        self._op_embedder = op_embedder or response_embedder
        self._op_word_attention = op_word_attention or response_word_attention
        self._op_encoder = op_encoder or response_encoder

        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
            self.rnn_input_dropout = InputVariationalDropout(dropout)
        else:
            self.dropout = None
            self.rnn_input_dropout = None

        self._output_feedforward = output_feedforward
        self._feature_feedforward = feature_feedforward

        self._num_labels = vocab.get_vocab_size(namespace="labels")

        #check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
        #                       "text field embedding dim", "encoder input dim")
        #check_dimensions_match(encoder.get_output_dim() * 4, projection_feedforward.get_input_dim(),
        #                       "encoder output dim", "projection feedforward input")
        #check_dimensions_match(projection_feedforward.get_output_dim(), inference_encoder.get_input_dim(),
        #                       "proj feedforward output dim", "inference lstm input dim")

        self._accuracy = BooleanAccuracy()
        self._fscore = F1Measure(positive_label=1)

        self._fake_accuracy = BooleanAccuracy()
        self._fake_fscore = F1Measure(positive_label=1)

        self._loss = torch.nn.functional.binary_cross_entropy_with_logits

        initializer(self)
Beispiel #27
0
 def __init__(self,
              rnn_cell: StackedLstm,
              source_attention_layer: AttentionLayer,
              target_attention_layer: AttentionLayer = None,
              dropout: float = 0.0) -> None:
     super().__init__()
     self.rnn_cell = rnn_cell
     self.source_attention_layer = source_attention_layer
     self.target_attention_layer = target_attention_layer
     self.dropout = InputVariationalDropout(dropout)
     self.use_coverage = source_attention_layer.attention.use_coverage
     self.hidden_vector_dim = self.rnn_cell.hidden_size
Beispiel #28
0
    def __init__(self, vocab:Vocabulary,
                 encoder_dim:int,
                 label_dim:int,
                 edge_dim:int,
                 dropout: float,
                 edge_label_namespace: str,
                 activation : Activation = None) -> None:
        """
            Parameters
            ----------
            vocab : ``Vocabulary``, required
                A Vocabulary, required in order to compute sizes for input/output projections.
            encoder_dim : ``int``, required.
                The output dimension of the encoder.
            label_dim : ``int``, required.
                The dimension of the hidden layer of the MLP used for predicting the edge labels.
            edge_dim : ``int``, required.
                The dimension of the hidden layer of the MLP used for predicting edge existence.
            edge_label_namespace: str,
                The namespace of the edge labels: a combination of the task name + _head_tags
            activation : ``Activation``, optional, (default = tanh).
                The activation function used in the MLPs.
            dropout : ``float``, optional, (default = 0.0)
                The variational dropout applied to the output of the encoder and MLP layers.
        """
        super(KGEdges, self).__init__(vocab)
        self._encoder_dim = encoder_dim
        if activation is None:
            self.activation = Activation.by_name("tanh")()
        else:
            self.activation = activation

        #edge existence:

        #these two matrices together form the feed forward network which takes the vectors of the two words in question and makes predictions from that
        #this is the trick described by Kiperwasser and Goldberg to make training faster.
        self.head_arc_feedforward = torch.nn.Linear(encoder_dim, edge_dim)
        self.child_arc_feedforward = torch.nn.Linear(encoder_dim, edge_dim, bias=False) #bias is already added by head_arc_feedforward

        self.arc_out_layer = torch.nn.Linear(edge_dim, 1, bias=False)  # K&G don't use a bias for the output layer

        #edge labels:
        num_labels = vocab.get_vocab_size(edge_label_namespace)

        #same trick again
        self.head_label_feedforward = torch.nn.Linear(encoder_dim, label_dim)
        self.child_label_feedforward = torch.nn.Linear(encoder_dim, label_dim, bias=False)

        self.label_out_layer = torch.nn.Linear(edge_dim, num_labels) #output layer for edge labels

        self._dropout = InputVariationalDropout(dropout)
Beispiel #29
0
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 dropout: float = 0.0,
                 input_dropout: float = 0.0,
                 label_smoothing: float = 0.1,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(SentimentClassifier, self).__init__(vocab, regularizer)

        self._text_field_embedder = text_field_embedder

        share_rnn = nn.LSTM(input_size=self._text_field_embedder.get_output_dim(),
                            hidden_size=150,
                            batch_first=True,
                            # dropout=dropout,
                            bidirectional=True)
        share_encoder = PytorchSeq2SeqWrapper(share_rnn)

        self._encoder = RNNEncoder(vocab, share_encoder, input_dropout, regularizer)
        self._seq_vec = CnnEncoder(self._encoder.get_output_dim(), 25)
        self._de_dim = len(TASKS_NAME)
        weight = torch.empty(self._de_dim, self._text_field_embedder.get_output_dim())
        torch.nn.init.orthogonal_(weight)
        self._domain_embeddings = Embedding(self._de_dim, self._text_field_embedder.get_output_dim(), weight=weight)
        self._de_attention = BilinearAttention(self._seq_vec.get_output_dim(),
                                               self._domain_embeddings.get_output_dim())
        self._de_feedforward = FeedForward(self._domain_embeddings.get_output_dim(), 1,
                                           self._seq_vec.get_output_dim(), Activation.by_name("elu")())

        self._num_classes = self.vocab.get_vocab_size("label")
        self._sentiment_discriminator = Discriminator(self._seq_vec.get_output_dim(), self._num_classes)
        self._s_domain_discriminator = Discriminator(self._seq_vec.get_output_dim(), len(TASKS_NAME))
        self._valid_discriminator = Discriminator(self._domain_embeddings.get_output_dim(), 2)
        self._dropout = InputVariationalDropout(dropout)
        self._input_dropout = Dropout(input_dropout)
        self._label_smoothing = label_smoothing

        self.metrics = {
            "s_domain_acc": CategoricalAccuracy(),
            "valid_acc": CategoricalAccuracy()
        }
        for task_name in TASKS_NAME:
            self.metrics["{}_stm_acc".format(task_name)] = CategoricalAccuracy()

        self._loss = torch.nn.CrossEntropyLoss()
        self._domain_loss = torch.nn.CrossEntropyLoss()
        # TODO torch.nn.BCELoss
        self._valid_loss = torch.nn.BCEWithLogitsLoss()

        initializer(self)
Beispiel #30
0
    def __init__(self,
                 vocab: Vocabulary,
                 source_text_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 decoder: SeqDecoder,
                 noisy_prediction: NoisyPredictionModel,
                 dropout: float = 0.2,
                 tied_source_embedder_key: Optional[str] = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:

        super(SalienceSeq2Seq, self).__init__(vocab, regularizer)

        self._source_text_embedder = source_text_embedder
        self._encoder = encoder
        self._decoder = decoder
        self._dropout = InputVariationalDropout(dropout)
        self._noisy_prediction = noisy_prediction

        if self._encoder.get_output_dim() != self._decoder.get_output_dim():
            raise ConfigurationError(
                f"Encoder output dimension {self._encoder.get_output_dim()} should be"
                f" equal to decoder dimension {self._decoder.get_output_dim()}."
            )
        if tied_source_embedder_key:
            # A bit of a ugly hack to tie embeddings.
            # Works only for `BasicTextFieldEmbedder`, and since
            # it can have multiple embedders, and `SeqDecoder` contains only a single embedder, we need
            # the key to select the source embedder to replace it with the target embedder from the decoder.
            if not isinstance(self._source_text_embedder,
                              BasicTextFieldEmbedder):
                raise ConfigurationError(
                    "Unable to tie embeddings,"
                    "Source text embedder is not an instance of `BasicTextFieldEmbedder`."
                )
            # pylint: disable=protected-access
            source_embedder = self._source_text_embedder._token_embedders[
                tied_source_embedder_key]
            if not isinstance(source_embedder, Embedding):
                raise ConfigurationError(
                    "Unable to tie embeddings,"
                    "Selected source embedder is not an instance of `Embedding`."
                )
            if source_embedder.get_output_dim(
            ) != self._decoder.target_embedder.get_output_dim():
                raise ConfigurationError(
                    f"Output Dimensions mismatch between"
                    f"source embedder and target embedder.")
            self._source_text_embedder._token_embedders[
                tied_source_embedder_key] = self._decoder.target_embedder
        initializer(self)