Esempio n. 1
0
  def __init__(self, config):
    """

    Args:
      config(Config):
    """
    super(ModelBaseLineSQC, self).__init__(config)
    # self.config = config
    # embedding
    self.bert = load_bert(config.bert_config)
    self.embed_context = Embedding(self.bert,
                                   config.bert_config.max_position_embeddings,
                                   config.bert_config.hidden_size)
    self.embed_question = Embedding(self.bert, config.max_query_length,
                                    config.bert_config.hidden_size)

    # encoder
    self.attention_direction = config.direction
    self.encoder = Encoder(config.encoder_hidden_layer_number,
                           config.bert_config.hidden_size,
                           config.bert_config.max_position_embeddings,
                           config.encoder_intermediate_dim,
                           config.attention_head_num,
                           config.attention_droup_out,
                           config.attention_use_bias,
                           bi_direction_attention=config.bi_direction_attention,
                           max_query_position=config.max_query_length,
                           attention_direction=config.direction
                           )

    # pointer
    self.pointer_linear = torch.nn.Linear(config.bert_config.hidden_size, 2)
    self.query_pointor_linear = torch.nn.Linear(config.bert_config.hidden_size, int(512 * 2 / config.max_query_length))
Esempio n. 2
0
 def __init__(self, config):
   super(ModelCLS, self).__init__(config)
   self.bert = load_bert(config.bert_config)
   self.embed_context = Embedding(self.bert,
                                  config.bert_config.max_position_embeddings,
                                  config.bert_config.hidden_size)
   self.embed_question = Embedding(self.bert, config.max_query_length,
                                   config.bert_config.hidden_size)
   self.start_pointer = torch.nn.Linear(self.config.bert_config.hidden_size * 2,
                                  self.config.bert_config.max_position_embeddings)
   self.end_pointer = torch.nn.Linear(self.config.bert_config.hidden_size * 2,
                                  self.config.bert_config.max_position_embeddings)
Esempio n. 3
0
  def __init__(self, config):
    """

    Args:
      config(Config):
    """
    super(ModelBaseLineNER, self).__init__(config)
    # self.config = config
    # embedding
    self.bert = load_bert(config.bert_config)
    self.embed_word = Embedding(self.bert,
                                   config.bert_config.max_position_embeddings,
                                   config.bert_config.hidden_size)

    # encoder
    self.encoder = Encoder(config.encoder_hidden_layer_number,
                           config.bert_config.hidden_size,
                           config.bert_config.max_position_embeddings,
                           config.encoder_intermediate_dim,
                           config.attention_head_num,
                           config.attention_droup_out,
                           config.attention_use_bias)

    self.rnn = torch.nn.LSTM(config.bert_config.hidden_size,
                             config.lstm_hidden_size,
                             num_layers=config.lstm_layer_num,
                             batch_first=config.lstm_batch_first,
                             bidirectional=config.lstm_bi_direction)
    self.rnn_linear = torch.nn.Linear(self.config.lstm_hidden_size * (2 if self.config.lstm_bi_direction else 1),
                                      self.config.crf_target_size + 2)
    self.rnn_noraml = torch.nn.LayerNorm((self.config.bert_config.max_position_embeddings,
                                          self.config.crf_target_size + 2))

    self.crf = CRF(self.config.crf_target_size, self.config.device=="cuda", self.config.crf_average_batch)



    # pointer
    self.pointer_linear = torch.nn.Linear(config.bert_config.hidden_size, self.config.crf_target_size + 2)