Beispiel #1
0
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.bert = BertModel(config)
        self.dropouts = nn.ModuleDict()
        self.classifiers = nn.ModuleDict()
    def __init__(self, config):
        super(Bert_for_UNILM, self).__init__(config)

        self.bert = BertModel(config)
        self.classifier = nn.Linear(config.hidden_size, config.vocab_size)
        weight = self.bert.embeddings.word_embeddings.weight
        self.classifier.weight.data = weight.data
Beispiel #3
0
    def __init__(self, config):

        super(BertForMultiLable, self).__init__(config)
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.apply(self.init_weights)
Beispiel #4
0
    def __init__(self, config):
        super(BertForMLMwithClassification, self).__init__(config)
        self.bert = BertModel(config)

        self.num_labels = config.num_labels
        self.cls = BertPreTrainingHeads(config)
        self.init_weights()
Beispiel #5
0
    def __init__(self, config):
        super(BertForReranking, self).__init__(config)

        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, 1)
        self.init_weights()
Beispiel #6
0
 def __init__(self, config, model_configs):
     super(BertBiLSTMCRF, self).__init__(config)
     self.num_labels = config.num_labels
     self.max_seq_length = model_configs['max_seq_length']
     self.bert = BertModel(config)
     self.use_cuda = model_configs['use_cuda'] and torch.cuda.is_available()
     self.crf = CRF(target_size=self.num_labels,
                    use_cuda=self.use_cuda,
                    average_batch=False)
     bert_embedding = config.hidden_size
     # hidden_dim即输出维度
     # lstm的hidden_dim和init_hidden的hidden_dim是一致的
     # 是输出层hidden_dim的1/2
     self.hidden_dim = config.hidden_size
     self.rnn_layers = model_configs['rnn_layers']
     self.lstm = nn.LSTM(
         input_size=bert_embedding,  # bert embedding
         hidden_size=self.hidden_dim,
         num_layers=self.rnn_layers,
         batch_first=True,
         # dropout = model_configs['train']['dropout_rate'],
         bidirectional=True)
     self.dropout = nn.Dropout(model_configs['dropout_rate'])
     self.hidden2label = nn.Linear(self.hidden_dim * 2, self.num_labels + 2)
     self.apply(self.init_weights)
Beispiel #7
0
 def __init__(self, config, num_classes, vocab) -> None:
     super(SentenceClassifier, self).__init__(config)
     self.bert = BertModel(config)
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.classifier = nn.Linear(config.hidden_size, num_classes)
     self.vocab = vocab
     self.apply(self.init_weights)
Beispiel #8
0
 def __init__(self, config):
     super(BertForEmotionClassification, self).__init__(config)
     self.bert = BertModel(config)
     self.num_labels = config.num_labels
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.fc = nn.Linear(config.hidden_size, self.num_labels)
     self.init_weights()
    def __init__(self, config):
        super(BertForQuestionAnswering, self).__init__(config)
        self.num_labels = config.num_labels

        self.bert = BertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)

        self.apply(self.init_weights)
    def __init__(self, config):

        super(BertForMultiLable, self).__init__(config)
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.hidden_size_1)
        self.classifier_1 = nn.Linear(config.hidden_size_1, config.num_labels)
        self.relu = nn.ReLU()
    def __init__(self, config):
        super(BertForPreTrainingMLM, self).__init__(config)

        self.bert = BertModel(config)
        self.cls = BertPreTrainingHeads(config)

        self.init_weights()
        self.tie_weights()
Beispiel #12
0
    def __init__(self, config):
        super(BertDebiasForSequenceClassification, self).__init__(config)
        self.num_labels = config.num_labels
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
        self.apply(self.init_weights)
        self.config = config
        self.hypothesis_only = self.get_bool_value(config, "hypothesis_only")
        self.gamma_focal = config.gamma_focal if hasattr(
            config, "gamma_focal") else 2
        self.ensemble_training = self.get_bool_value(config,
                                                     "ensemble_training")
        self.poe_alpha = config.poe_alpha if hasattr(config,
                                                     'poe_alpha') else 1

        # Sets the rubi parameters.
        self.similarity = self.get_list_value(config, "similarity")
        self.rubi = self.get_bool_value(config, 'rubi')
        self.hans = self.get_bool_value(config, 'hans')
        self.hans_features = self.get_bool_value(config, 'hans_features')
        self.focal_loss = self.get_bool_value(config, 'focal_loss')
        self.length_features = self.get_list_value(config, "length_features")
        self.hans_only = self.get_bool_value(config, 'hans_only')
        self.aggregate_ensemble = self.get_str_value(config,
                                                     'aggregate_ensemble')
        self.poe_loss = self.get_bool_value(config, 'poe_loss')
        self.weighted_bias_only = self.get_bool_value(config,
                                                      "weighted_bias_only")

        num_labels_bias_only = self.config.num_labels
        if self.rubi or self.hypothesis_only or self.focal_loss or self.poe_loss or self.hans_only:
            if self.hans:
                num_features = 4 + len(self.similarity)

                if self.hans_features:
                    num_features += len(self.length_features)

                if not config.nonlinear_h_classifier:
                    self.h_classifier1 = nn.Linear(num_features,
                                                   num_labels_bias_only)
                else:
                    self.h_classifier1 = nn.Sequential(
                        nn.Linear(num_features, num_features), nn.Tanh(),
                        nn.Linear(num_features, num_features), nn.Tanh(),
                        nn.Linear(num_features, num_labels_bias_only))

                if self.ensemble_training:
                    self.h_classifier1_second = self.get_classifier(
                        config, config.nonlinear_h_classifier,
                        num_labels_bias_only)
            else:
                # Loads the classifiers from the pretrained model.
                self.h_classifier1 = self.get_classifier(
                    config, config.nonlinear_h_classifier,
                    num_labels_bias_only)

            self.lambda_h = config.lambda_h
Beispiel #13
0
    def __init__(self, config):
        super(BertForTokenClassification1hot, self).__init__(config)
        self.num_labels = 2  # config.num_labels ## for us, each output vector is "yes/no", so we should keep this at self.num_labels=2 to avoid any strange error later

        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        self.apply(self.init_weights)
 def __init__(self, config, max_seq_length=128):
     super(BertForNamedEntityRecognition, self).__init__(config)
     self.bert = BertModel(config)
     self.num_labels = config.num_labels
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.hidden_size = config.hidden_size
     self.max_seq_length = max_seq_length
     self.classifier = nn.Linear(config.hidden_size, config.num_labels)
     self.apply(self.init_weights)
Beispiel #15
0
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, self.num_labels)
        self.apply(self.init_weights)
        self.loss = None
Beispiel #16
0
    def __init__(self, config, n_filters=None, filter_sizes=None):
        super(BertCNN, self).__init__(config)
        self.num_labels = config.num_labels
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

        self.convs = Conv1d(config.hidden_size, n_filters, filter_sizes)

        self.classifier = nn.Linear(len(filter_sizes) * n_filters, config.num_labels)
        self.init_weights()
Beispiel #17
0
    def __init__(self, bert_model_config: BertConfig):
        super(DocumentBertLinear, self).__init__(bert_model_config)
        self.bert = BertModel(bert_model_config)
        self.bert_batch_size = self.bert.config.bert_batch_size
        self.dropout = nn.Dropout(p=bert_model_config.hidden_dropout_prob)

        self.classifier = nn.Sequential(
            nn.Dropout(p=bert_model_config.hidden_dropout_prob),
            nn.Linear(bert_model_config.hidden_size * self.bert_batch_size,
                      bert_model_config.num_labels), nn.Tanh())
Beispiel #18
0
 def __init__(self, config, tie_weights):
     super(BertMCQWeightedSum, self).__init__(config)
     self.bert = BertModel(config)
     self._dropout = nn.Dropout(config.hidden_dropout_prob)
     self._classification_layer = nn.Linear(config.hidden_size, 1)
     if tie_weights is True:
         self._weight_layer = self._classification_layer
     else:
         self._weight_layer = nn.Linear(config.hidden_size, 1)
     self.apply(self.init_weights)
Beispiel #19
0
 def __init__(self, config):
     # Use Multi-layer perceptron as classifier (in comparison to single linear layer above)
     super(BertForEmotionClassificationMultihead, self).__init__(config)
     self.bert = BertModel(config)
     self.hidden_size = config.hidden_size
     self.num_labels = config.num_labels
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.nonlinear = nn.PReLU()
     self.classifier = self.make_classifier([512, 256, 128])
     self.init_weights()
Beispiel #20
0
    def __init__(self, config):

        super(BertFCForMultiLable, self).__init__(config)
        # bert = BertModel.from_pretrained(bert_model_path)
        self.bert = BertModel(config)
        for param in self.bert.parameters():
            param.requires_grad = True
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.apply(self.init_weights)
Beispiel #21
0
    def __init__(self, config):
        super(NegationModel, self).__init__(config)
        self.num_labels = config.num_labels

        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier_cue = nn.Linear(config.hidden_size, 1)#config.num_labels)
        self.classifier_scope = nn.Linear(config.hidden_size+1, 1)#config.num_labels)

        self.init_weights()
    def __init__(self, bert_model_config: BertConfig):
        super(BertSimilarityRegressor, self).__init__(bert_model_config)
        self.bert = BertModel(bert_model_config)
        linear_size = bert_model_config.hidden_size

        self.regression = nn.Sequential(
            nn.Dropout(p=bert_model_config.hidden_dropout_prob),
            nn.Linear(linear_size, 1))

        self.apply(self.init_weights)
Beispiel #23
0
 def __init__(self, config):
     super(BertMCQMAC, self).__init__(config)
     config.output_attentions = True
     self.bert = BertModel(config)
     self._dropout = nn.Dropout(config.hidden_dropout_prob)
     self._classification_layer = nn.Linear(config.hidden_size, 1)
     self._key_components_detection_layer = nn.Linear(
         3 * config.hidden_size, 1)
     self._attention_layer = 13
     self._attention_head = 4
     self.apply(self.init_weights)
    def __init__(self, bert_model_config: BertConfig):
        super(DocumentBertLinear, self).__init__(bert_model_config)
        self.bert = BertModel(bert_model_config)
        self.bert_batch_size = self.bert.config.bert_batch_size
        self.dropout = nn.Dropout(p=bert_model_config.hidden_dropout_prob)

        #self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6, norm=nn.LayerNorm(bert_model_config.hidden_size))
        self.classifier = nn.Sequential(
            nn.Dropout(p=bert_model_config.hidden_dropout_prob),
            nn.Linear(bert_model_config.hidden_size * self.bert_batch_size,
                      bert_model_config.num_labels), nn.Tanh())
    def __init__(self, config):

        super(BertForMultiLable, self).__init__(config)
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        # self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.classifier = GRUNet(1,
                                 hidden_dim=356,
                                 output_dim=config.num_labels,
                                 n_layers=3)
        self.init_weights()
Beispiel #26
0
    def __init__(self, config, model_configs):
        super(Bert, self).__init__(config)
        self.num_labels = config.num_labels
        self.bert = BertModel(config)
        self.hidden_dim = config.hidden_size
        self.use_cuda = model_configs['use_cuda'] and torch.cuda.is_available()
        self.dropout = nn.Dropout(model_configs['dropout_rate'])
        self.hidden2label = nn.Linear(self.hidden_dim, self.num_labels)
        self.loss_function = CrossEntropyLoss()
        self.max_seq_length = model_configs['max_seq_length']

        self.apply(self.init_weights)
    def __init__(self, bert_model_config: BertConfig):
        super(BertSimilarityRegressor, self).__init__(bert_model_config)
        self.bert = BertModel(bert_model_config)
        linear_size = bert_model_config.hidden_size
        if bert_model_config.pretrained_config_archive_map['additional_features'] is not None:
            linear_size+=bert_model_config.pretrained_config_archive_map['additional_features']

        self.regression = nn.Sequential(
            nn.Dropout(p=bert_model_config.hidden_dropout_prob),
            nn.Linear(linear_size, 1)
        )

        self.apply(self.init_weights)
Beispiel #28
0
    def __init__(self, config, label2id, device, num_layers=2, lstm_dropout=0.35):
        super(BERTLSTMCRF, self).__init__(config)
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, len(label2id))
        self.init_weights()
        self.bilstm = nn.LSTM(input_size=config.hidden_size, hidden_size=config.hidden_size // 2,
                             batch_first=True,
                             num_layers=num_layers,
                             dropout=lstm_dropout,
                             bidirectional=True)

        self.layer_norm = normalization.LayerNorm(config.hidden_size)
        self.crf = crf.CRF(tagset_size=len(label2id), tag_dictionary=label2id, device=device, is_bert=True)
Beispiel #29
0
    def create_bert_model_from_json_config(bert_json_config, opt):
        if bert_json_config:
            logger.info("bert_json_config{}".format(bert_json_config))
            bert_config = json.loads(bert_json_config)
            logger.info("load bert_json_config: {}".format(bert_config))
        else:
            bert_config= None

        assert bert_config, "bert_config:{}".format(bert_json_config)
        # use seperate bert model
        config = BertConfig.from_pretrained(bert_config['bert_config_name'])
        config.output_attentions = True
        config.output_hidden_states = True
        bert_model = BertModel(config)
        return bert_model
Beispiel #30
0
    def __init__(self, config):
        super(BertPointer, self).__init__(config)
        self.num_labels = config.num_labels

        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

        # classifiers
        self.ext_start_classifier = nn.Linear(config.hidden_size, 1, bias=False)
        self.ext_end_classifier = nn.Linear(config.hidden_size, 1, bias=False)
        self.aug_start_classifier = nn.Linear(config.hidden_size, 1, bias=False)
        self.aug_end_classifier = nn.Linear(config.hidden_size, 1, bias=False)

        self.label_classifier = nn.Linear(config.hidden_size, self.config.num_labels)

        self.apply(self.init_weights)