コード例 #1
0
ファイル: seq_ner.py プロジェクト: foxlf823/dygiepp
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 context_layer: Seq2SeqEncoder,
                 modules,  # TODO(dwadden) Add type.
                 feature_size: int,
                 max_span_width: int,
                 loss_weights: Dict[str, int],
                 lexical_dropout: float = 0.2,
                 lstm_dropout: float = 0.4,
                 use_attentive_span_extractor: bool = False,
                 co_train: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 display_metrics: List[str] = None,
                 span_extractor: SpanExtractor = None) -> None:
        super(SeqNER, self).__init__(vocab, regularizer)

        # logger.info(vocab.get_index_to_token_vocabulary("ner_sequence_labels"))
        # exit(1)

        self._text_field_embedder = text_field_embedder
        self._context_layer = context_layer

        self._loss_weights = loss_weights
        self._permanent_loss_weights = copy.deepcopy(self._loss_weights)

        # Need to add this line so things don't break. TODO(dwadden) sort out what's happening.
        modules = Params(modules)

        self._ner = NERTagger.from_params(vocab=vocab,
                                          feature_size=feature_size,
                                          params=modules.pop("ner"))

        # self._ner = NERTagger_Has_None.from_params(vocab=vocab,
        #                                   feature_size=feature_size,
        #                                   params=modules.pop("ner"))

        self._seq = SeqLabel.from_params(vocab=vocab, feature_size=feature_size, params=modules.pop("seq"))

        self._endpoint_span_extractor = span_extractor

        self._max_span_width = max_span_width

        self._display_metrics = display_metrics

        if lexical_dropout > 0:
            self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
        else:
            self._lexical_dropout = lambda x: x

        if lstm_dropout > 0:
            self._lstm_dropout = torch.nn.Dropout(p=lstm_dropout)
        else:
            self._lstm_dropout = lambda x: x

        initializer(self)
コード例 #2
0
    def __init__(self,
                 vocab: Vocabulary,
                 embedder: TextFieldEmbedder,
                 modules,  # TODO(dwadden) Add type.
                 feature_size: int,
                 max_span_width: int,
                 target_task: str,
                 feedforward_params: Dict[str, Union[int, float]],
                 loss_weights: Dict[str, float],
                 initializer: InitializerApplicator = InitializerApplicator(),
                 module_initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 display_metrics: List[str] = None) -> None:
        super(DyGIE, self).__init__(vocab, regularizer)

        ####################

        # Create span extractor.
        self._endpoint_span_extractor = EndpointSpanExtractor(
            embedder.get_output_dim(),
            combination="x,y",
            num_width_embeddings=max_span_width,
            span_width_embedding_dim=feature_size,
            bucket_widths=False)

        ####################

        # Set parameters.
        self._embedder = embedder
        self._loss_weights = loss_weights
        self._max_span_width = max_span_width
        self._display_metrics = self._get_display_metrics(target_task)
        token_emb_dim = self._embedder.get_output_dim()
        span_emb_dim = self._endpoint_span_extractor.get_output_dim()

        ####################

        # Create submodules.

        modules = Params(modules)

        # Helper function to create feedforward networks.
        def make_feedforward(input_dim):
            return FeedForward(input_dim=input_dim,
                               num_layers=feedforward_params["num_layers"],
                               hidden_dims=feedforward_params["hidden_dims"],
                               activations=torch.nn.ReLU(),
                               dropout=feedforward_params["dropout"])

        # Submodules

        self._ner = NERTagger.from_params(vocab=vocab,
                                          make_feedforward=make_feedforward,
                                          span_emb_dim=span_emb_dim,
                                          feature_size=feature_size,
                                          params=modules.pop("ner"))

        self._coref = CorefResolver.from_params(vocab=vocab,
                                                make_feedforward=make_feedforward,
                                                span_emb_dim=span_emb_dim,
                                                feature_size=feature_size,
                                                params=modules.pop("coref"))

        self._relation = RelationExtractor.from_params(vocab=vocab,
                                                       make_feedforward=make_feedforward,
                                                       span_emb_dim=span_emb_dim,
                                                       feature_size=feature_size,
                                                       params=modules.pop("relation"))

        self._events = EventExtractor.from_params(vocab=vocab,
                                                  make_feedforward=make_feedforward,
                                                  token_emb_dim=token_emb_dim,
                                                  span_emb_dim=span_emb_dim,
                                                  feature_size=feature_size,
                                                  params=modules.pop("events"))

        ####################

        # Initialize text embedder and all submodules
        for module in [self._ner, self._coref, self._relation, self._events]:
            module_initializer(module)

        initializer(self)
コード例 #3
0
    def __init__(
            self,
            vocab: Vocabulary,
            text_field_embedder: TextFieldEmbedder,
            context_layer: Seq2SeqEncoder,
            modules,  # TODO(dwadden) Add type.
            feature_size: int,
            max_span_width: int,
            loss_weights: Dict[str, int],
            lexical_dropout: float = 0.2,
            lstm_dropout: float = 0.4,
            use_attentive_span_extractor: bool = False,
            co_train: bool = False,
            initializer: InitializerApplicator = InitializerApplicator(),
            regularizer: Optional[RegularizerApplicator] = None,
            display_metrics: List[str] = None) -> None:
        super(DyGIE, self).__init__(vocab, regularizer)

        self._text_field_embedder = text_field_embedder
        self._context_layer = context_layer

        self._loss_weights = loss_weights
        self._permanent_loss_weights = copy.deepcopy(self._loss_weights)

        # Need to add this line so things don't break. TODO(dwadden) sort out what's happening.
        modules = Params(modules)
        self._coref = CorefResolver.from_params(vocab=vocab,
                                                feature_size=feature_size,
                                                params=modules.pop("coref"))
        self._ner = NERTagger.from_params(vocab=vocab,
                                          feature_size=feature_size,
                                          params=modules.pop("ner"))
        self._relation = RelationExtractor.from_params(
            vocab=vocab,
            feature_size=feature_size,
            params=modules.pop("relation"))
        self._events = EventExtractor.from_params(vocab=vocab,
                                                  feature_size=feature_size,
                                                  params=modules.pop("events"))

        # Make endpoint span extractor.

        self._endpoint_span_extractor = EndpointSpanExtractor(
            context_layer.get_output_dim(),
            combination="x,y",
            num_width_embeddings=max_span_width,
            span_width_embedding_dim=feature_size,
            bucket_widths=False)
        if use_attentive_span_extractor:
            self._attentive_span_extractor = SelfAttentiveSpanExtractor(
                input_dim=text_field_embedder.get_output_dim())
        else:
            self._attentive_span_extractor = None

        self._max_span_width = max_span_width

        self._display_metrics = display_metrics

        if lexical_dropout > 0:
            self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
        else:
            self._lexical_dropout = lambda x: x

        # Do co-training if we're training on ACE and ontonotes.
        self._co_train = co_train

        # Big gotcha: PyTorch doesn't add dropout to the LSTM's output layer. We need to do this
        # manually.
        if lstm_dropout > 0:
            self._lstm_dropout = torch.nn.Dropout(p=lstm_dropout)
        else:
            self._lstm_dropout = lambda x: x

        initializer(self)
コード例 #4
0
ファイル: discontinuous_ner.py プロジェクト: foxlf823/dygiepp
    def __init__(
            self,
            vocab: Vocabulary,
            text_field_embedder: TextFieldEmbedder,
            context_layer: Seq2SeqEncoder,
            modules,  # TODO(dwadden) Add type.
            feature_size: int,
            max_span_width: int,
            loss_weights: Dict[str, int],
            use_tree: bool,
            use_syntax: bool,
            use_dep: bool,
            use_tree_feature: bool,
            tree_feature_first: bool,
            tree_feature_usage: str,
            tree_feature_arch: str,
            tree_span_filter: bool = False,
            lexical_dropout: float = 0.2,
            lstm_dropout: float = 0.4,
            use_attentive_span_extractor: bool = False,
            co_train: bool = False,
            initializer: InitializerApplicator = InitializerApplicator(),
            regularizer: Optional[RegularizerApplicator] = None,
            display_metrics: List[str] = None,
            span_extractor: SpanExtractor = None) -> None:
        super(DisNER, self).__init__(vocab, regularizer)

        self._text_field_embedder = text_field_embedder
        self._context_layer = context_layer

        self._loss_weights = loss_weights
        self._permanent_loss_weights = copy.deepcopy(self._loss_weights)

        # Need to add this line so things don't break. TODO(dwadden) sort out what's happening.
        modules = Params(modules)

        self._ner = NERTagger.from_params(vocab=vocab,
                                          feature_size=feature_size,
                                          params=modules.pop("ner"))
        self._relation = RelationExtractor.from_params(
            vocab=vocab,
            feature_size=feature_size,
            params=modules.pop("relation"))

        # Make endpoint span extractor.
        self._endpoint_span_extractor = span_extractor

        self._max_span_width = max_span_width

        self._display_metrics = display_metrics

        if lexical_dropout > 0:
            self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
        else:
            self._lexical_dropout = lambda x: x

        # Do co-training if we're training on ACE and ontonotes.
        self._co_train = co_train

        # Big gotcha: PyTorch doesn't add dropout to the LSTM's output layer. We need to do this
        # manually.
        if lstm_dropout > 0:
            self._lstm_dropout = torch.nn.Dropout(p=lstm_dropout)
        else:
            self._lstm_dropout = lambda x: x

        self.use_tree = use_tree
        if self.use_tree:
            self.tree_feature_first = tree_feature_first
            self.use_syntax = use_syntax
            if self.use_syntax:
                self._syntax_embedding = Embedding(
                    vocab.get_vocab_size('span_syntax_labels'), feature_size)
            self._tree = Tree.from_params(vocab=vocab,
                                          feature_size=feature_size,
                                          params=modules.pop("tree"))

            self.tree_span_filter = tree_span_filter
            if self.tree_span_filter:
                self._tree_span_embedding = Embedding(
                    vocab.get_vocab_size('span_tree_labels'), feature_size)

        self.use_dep = use_dep
        if self.use_dep:
            # self._dep_tree = Tree.from_params(vocab=vocab,
            #                                   feature_size=feature_size,
            #                                   params=modules.pop("dep_tree"))
            self._dep_tree = TreeDep.from_params(
                vocab=vocab,
                feature_size=feature_size,
                params=modules.pop("dep_tree"))
            self._tree_feature_usage = tree_feature_usage

        self.use_tree_feature = use_tree_feature
        if self.use_tree_feature:
            # self._tf_f1_embedding = Embedding(vocab.get_vocab_size('tf_f1_labels'), 1)
            # self._tf_f2_embedding = Embedding(vocab.get_vocab_size('tf_f2_labels'), 1)
            # self._tf_f3_embedding = Embedding(vocab.get_vocab_size('tf_f3_labels'), 1)
            # self._tf_f4_embedding = Embedding(vocab.get_vocab_size('tf_f4_labels'), 1)
            # self._tf_f5_embedding = Embedding(vocab.get_vocab_size('tf_f5_labels'), 1)

            self._tree_feature_arch = tree_feature_arch
            if self._tree_feature_arch == 'transformer':
                self._tf_layer = MyTransformer.from_params(
                    vocab=vocab, params=modules.pop("tf_transformer"))
            elif self._tree_feature_arch == 'gcn':
                self._tf_layer = TreeFeature.from_params(
                    vocab=vocab, params=modules.pop("tf_layer"))
            elif self._tree_feature_arch == 'mhsa':
                self._tf_layer = TreeFeatureMHSA.from_params(
                    vocab=vocab, params=modules.pop("tf_mhsa"))
            else:
                raise RuntimeError("wrong tree_feature_arch: {}".format(
                    self._tree_feature_arch))
            self._tree_feature_usage = tree_feature_usage

        initializer(self)