Exemple #1
0
    def __init__(
        self,
        vocab: Vocabulary,
        model_name: Union[str, AutoModel],
        embedding_dropout: float = 0.0,
        initializer: InitializerApplicator = InitializerApplicator(),
        label_smoothing: float = None,
        ignore_span_metric: bool = False,
        srl_eval_path: str = DEFAULT_SRL_EVAL_PATH,
        restrict_frames: bool = False,
        restrict_roles: bool = False,
        **kwargs,
    ) -> None:
        # bypass SrlBert constructor
        Model.__init__(self, vocab, **kwargs)
        self.lemma_frame_dict = load_lemma_frame(LEMMA_FRAME_PATH)
        self.frame_role_dict = load_role_frame(FRAME_ROLE_PATH)
        self.restrict_frames = restrict_frames
        self.restrict_roles = restrict_roles

        if isinstance(model_name, str):
            self.transformer = AutoModel.from_pretrained(model_name)
        else:
            self.transformer = model_name
        # loss
        self.role_criterion = nn.CrossEntropyLoss(ignore_index=0)
        self.frame_criterion = nn.CrossEntropyLoss()
        # number of classes
        self.num_classes = self.vocab.get_vocab_size("labels")
        self.frame_num_classes = self.vocab.get_vocab_size("frames_labels")
        # metrics
        role_set = self.vocab.get_token_to_index_vocabulary("labels")
        role_set_filter = [v for k, v in role_set.items() if k != "O"]
        self.f1_role_metric = FBetaMeasure(average="micro", labels=role_set_filter)
        self.f1_frame_metric = FBetaMeasure(average="micro")
        # output layer
        self.tag_projection_layer = nn.Linear(self.transformer.config.hidden_size, self.num_classes)
        self.frame_projection_layer = nn.Linear(
            self.transformer.config.hidden_size, self.frame_num_classes
        )
        self.embedding_dropout = nn.Dropout(p=embedding_dropout)
        self._label_smoothing = label_smoothing
        initializer(self)
Exemple #2
0
 def __init__(
     self,
     vocab: Vocabulary,
     bert_model: Union[str, AutoModel],
     embedding_dropout: float = 0.0,
     initializer: InitializerApplicator = InitializerApplicator(),
     label_smoothing: float = None,
     ignore_span_metric: bool = False,
     srl_eval_path: str = DEFAULT_SRL_EVAL_PATH,
     restrict_frames: bool = False,
     restrict_roles: bool = False,
     inventory: str = "verbatlas",
     **kwargs,
 ) -> None:
     # bypass SrlBert constructor
     Model.__init__(self, vocab, **kwargs)
     self.lemma_frame_dict = load_lemma_frame(LEMMA_FRAME_PATH)
     self.frame_role_dict = load_role_frame(FRAME_ROLE_PATH)
     self.restrict_frames = restrict_frames
     self.restrict_roles = restrict_roles
     self.transformer = AutoModel.from_pretrained(bert_model)
     self.frame_criterion = nn.CrossEntropyLoss()
     if inventory == "verbatlas":
         # add missing labels
         frame_list = load_label_list(FRAME_LIST_PATH)
         self.vocab.add_tokens_to_namespace(frame_list, "frames_labels")
     self.num_classes = self.vocab.get_vocab_size("labels")
     self.frame_num_classes = self.vocab.get_vocab_size("frames_labels")
     if srl_eval_path is not None:
         # For the span based evaluation, we don't want to consider labels
         # for verb, because the verb index is provided to the model.
         self.span_metric = SrlEvalScorer(srl_eval_path, ignore_classes=["V"])
     else:
         self.span_metric = None
     self.f1_frame_metric = FBetaMeasure(average="micro")
     self.tag_projection_layer = nn.Linear(self.transformer.config.hidden_size, self.num_classes)
     self.frame_projection_layer = nn.Linear(
         self.transformer.config.hidden_size, self.frame_num_classes
     )
     self.embedding_dropout = nn.Dropout(p=embedding_dropout)
     self._label_smoothing = label_smoothing
     self.ignore_span_metric = ignore_span_metric
     initializer(self)
Exemple #3
0
    def __init__(self,
                 vocab: Vocabulary,
                 response_only_predictor: CMVPredictor,
                 op_response_predictor: CMVPredictor,
                 output_feedforward: FeedForward,
                 dropout: float = 0.5,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:

        Model.__init__(self, vocab, regularizer)

        self._response_only_predictor = response_only_predictor
        self._op_response_predictor = op_response_predictor

        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None

        self._output_feedforward = output_feedforward

        self._num_labels = vocab.get_vocab_size(namespace="labels")

        #check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
        #                       "text field embedding dim", "encoder input dim")
        #check_dimensions_match(encoder.get_output_dim() * 4, projection_feedforward.get_input_dim(),
        #                       "encoder output dim", "projection feedforward input")
        #check_dimensions_match(projection_feedforward.get_output_dim(), inference_encoder.get_input_dim(),
        #                       "proj feedforward output dim", "inference lstm input dim")

        self._accuracy = BooleanAccuracy()
        self._fscore = F1Measure(positive_label=1)

        self._fake_accuracy = BooleanAccuracy()
        self._fake_fscore = F1Measure(positive_label=1)

        self._loss = torch.nn.functional.binary_cross_entropy_with_logits

        initializer(self)