Ejemplo n.º 1
0
	def __init__(self, config):
		super(BertSentClassifier, self).__init__()
		self.num_labels 		= 	config.num_labels
		self.bert  			= 	BertModel.from_pretrained('bert-base-uncased')	

		self.dropout 			= 	torch.nn.Dropout(config.hidden_dropout_prob)
		self.classifier         	=   	torch.nn.Linear(config.hidden_size, config.num_labels)
Ejemplo n.º 2
0
 def __init__(self):
     super().__init__()
     self.bert = BertModel.from_pretrained('bert_base/')
     if args.bert_freeze:
         for param in self.bert.parameters():
             param.requires_grad = False
     self.dropout = nn.Dropout(args.bert_dropout)
     self.linear = nn.Linear(args.bert_hidden_size, len(labels), bias=True)
Ejemplo n.º 3
0
	def __init__(self, config, pretrained_weights):
		super(PretrainedBert, self).__init__()
		self.num_labels 		= 	config.num_labels
		self.bert  			= 	BertModel.from_pretrained('bert-base-uncased')	

		self.dropout 			= 	torch.nn.Dropout(config.hidden_dropout_prob)
		self.classifier         	=   	torch.nn.Linear(config.hidden_size, config.num_labels)
		self.classifier.weight		= 	torch.nn.Parameter(pretrained_weights['weights'])
		self.classifier.bias		= 	torch.nn.Parameter(pretrained_weights['bias'])
Ejemplo n.º 4
0
    def __init__(self):
        super().__init__()
        self.bert = BertModel.from_pretrained('bert_large/')
        if args.bert_freeze:
            for param in self.bert.parameters():
                param.requires_grad = False

        self.context_dropout = nn.Dropout(args.context_dropout)
        self.mention_dropout = nn.Dropout(args.mention_dropout)

        self.layer_norm = nn.LayerNorm(args.bert_hidden_size)
        self.multi_head_atten = MultiHeadAttention(args.bert_hidden_size,
                                                   num_heads=8,
                                                   dropout=0.1)
        self.mention_char_atten = MultiHeadAttention(args.bert_hidden_size,
                                                     num_heads=8,
                                                     dropout=0.1)

        self.context_lstm = BiLSTM(input_size=args.bert_hidden_size,
                                   hidden_size=args.rnn_hidden_size,
                                   num_layers=args.rnn_num_layers,
                                   dropout=args.rnn_dropout,
                                   num_dirs=args.rnn_num_dirs)

        self.mention_lstm = BiLSTM(input_size=args.bert_hidden_size,
                                   hidden_size=args.rnn_hidden_size,
                                   num_layers=args.rnn_num_layers,
                                   dropout=args.rnn_dropout,
                                   num_dirs=args.rnn_num_dirs)

        self.context_attn_sum = SelfAttentiveSum(args.bert_hidden_size, 100)
        self.mention_attn_sum = SelfAttentiveSum(args.bert_hidden_size, 1)

        self.char_cnn = CharCNN(embedding_num=len(char_vocab),
                                embedding_dim=args.cnn_embedding_dim,
                                filters=eval(args.cnn_filters),
                                output_dim=args.cnn_output_dim)

        self.linear = nn.Linear(in_features=2 * args.bert_hidden_size +
                                args.cnn_output_dim,
                                out_features=len(labels),
                                bias=True)

        if args.interaction:
            self.mention_linear = nn.Linear(in_features=args.bert_hidden_size +
                                            args.cnn_output_dim,
                                            out_features=args.bert_hidden_size,
                                            bias=True)
            self.affinity_matrix = nn.Linear(args.bert_hidden_size,
                                             args.bert_hidden_size)
            self.fusion = Fusion(args.bert_hidden_size)
            self.normalize = Normalize()
            self.fusion_linear = nn.Linear(in_features=2 *
                                           args.bert_hidden_size,
                                           out_features=len(labels),
                                           bias=True)
Ejemplo n.º 5
0
    def __init__(self, num_labels, bret_pretrainded_path):
        """
        在定义task model 的时候必须包含的两部分: self.train_state 和 self.device
        如果不包含着两部分将task model放入Training中进行训练的时候就会报错
        另外模型还要包含两部分,一部分是模型的结构部分,一部分是loss function
        :param num_labels:
        :param bret_pretrainded_path:
        """
        # 初始化
        super().__init__()

        # 构建深度学习的网络结构
        self.bert = BertModel.from_pretrained(bret_pretrainded_path)
        self.fc = nn.Linear(768, num_labels)
Ejemplo n.º 6
0
    def __init__(self, config):
        super(BertSentClassifier, self).__init__()
        self.num_labels = config.num_labels
        self.bert = BertModel.from_pretrained('bert-base-uncased')

        # pretrain mode does not require updating bert paramters.
        for param in self.bert.parameters():
            if config.option == 'pretrain':
                param.requires_grad = False
            elif config.option == 'finetune':
                param.requires_grad = True

        # todo
        raise NotImplementedError
Ejemplo n.º 7
0
import torch
from bert import BertModel
sanity_data = torch.load("./sanity_check.data")
# text_batch = ["hello world", "hello neural network for NLP"]
# tokenizer here
sent_ids = torch.tensor([[101, 7592, 2088, 102, 0, 0, 0, 0],
                         [101, 7592, 15756, 2897, 2005, 17953, 2361, 102]])
att_mask = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]])

# load our model
bert = BertModel.from_pretrained('bert-base-uncased')
outputs = bert(sent_ids, att_mask)
for k in ['last_hidden_state', 'pooler_output']:
    assert torch.allclose(outputs[k], sanity_data[k], atol=1e-4, rtol=0)