Beispiel #1
0
 def __init__(self, config):
     super(BertModel, self).__init__(config)
     self.embeddings = BertEmbeddings(config)
     self.encoder = BertEncoder(config)
     self.pooler = BertPooler(config)
     print("BertModel init bert weights")
     self.apply(self.init_bert_weights)
Beispiel #2
0
 def __init__(self, config, gen_attention_mask):
     super(BertWithCustomAttentionMask, self).__init__(config)
     self.embeddings = BertEmbeddings(config)
     self.encoder = BertEncoder(config)
     self.pooler = BertPooler(config)
     self.apply(self.init_bert_weights)
     self.gen_attention_mask = gen_attention_mask
 def __init__(self, configML, num_classes=1):
     super(BERTForMultiLabelSequenceClassification, self).__init__()
     self.num_classes = num_classes
     self.bert = BertModel.from_pretrained(configML.bert_path)
     self.pooler = BertPooler(configML)
     for param in self.bert.parameters():
         param.required_grad = True
     self.fc = nn.Linear(configML.hidden_size, self.num_classes)
Beispiel #4
0
    def __init__(self, config):
        super(BertModel_Quant, self).__init__(config)
        self.embeddings = BertEmbeddings_Quant(config)
        config_dict = config.__dict__
        if 'freeze_embedding' in config_dict and config_dict[
                'freeze_embedding'] is True:
            for module in self.embeddings.modules():
                for param in module.parameters():
                    param.requires_grad = False
        self.encoder = BertEncoder_Quant(config)

        # re init every QuantLinear layer with its config and corresponding bits.
        if 'layer_bits' in config_dict:
            layer_bits = config_dict['layer_bits']
            layer_requires_grad = config_dict.get('layer_requires_grad', None)
            for module_path, module in self.encoder.named_modules():
                classname = module.__class__.__name__
                if classname is "QuantLinear" and module_path in layer_bits:
                    number_bits = layer_bits[module_path]
                    module.reset_bits(number_bits)
                    # logger.info(
                    #     f'Reset module_path: {module_path}, number_bits: {number_bits}'
                    # )
                    if layer_requires_grad is not None:
                        for param in module.parameters():
                            param.requires_grad = layer_requires_grad[
                                module_path]

            # activation quantization module
            if 'quantize_activation' in config_dict and config_dict[
                    'quantize_activation']:
                logger.info("activation will be quantized.")
                self.encoder = insert_quant_act_modules(self.encoder)

            if config_dict.get('emb_bits', None):
                logger.info("embeddings' bits will be reset.")
                emb_bits = config_dict['emb_bits']
                for module_path, module in self.embeddings.named_modules():
                    classname = module.__class__.__name__
                    if classname is "QuantEmbedding" and module_path in emb_bits:
                        number_bits = emb_bits[module_path]
                        module.reset_bits(number_bits)
                        logger.info(
                            f'Reset module_path: {module_path}, number_bits: {number_bits}'
                        )

        # Test mode feature.
        # TODO: Make these config parser into functions.
        if 'eval-blending-alpha' in config_dict:
            for module in self.encoder.modules():
                classname = module.__class__.__name__
                if classname is "QuantLinear":
                    module.reset_alpha(config_dict['eval-blending-alpha'])

        self.pooler = BertPooler(config)
        self.apply(self.init_bert_weights)
Beispiel #5
0
 def __init__(self, config):
     """
     :param config: a BertConfig class instance with the configuration to build a new model
     :type config: BertConfig
     """
     super(BertModel, self).__init__(config)
     self.embeddings = BertEmbeddings(config)
     self.encoder = BertEncoder(config)
     self.pooler = BertPooler(config)
     self.apply(self.init_bert_weights)
Beispiel #6
0
 def __init__(self, config):
     super(BertModelModified, self).__init__(config)
     self.embeddings = BertEmbeddingsModified(config)
     self.encoder = BertEncoder(config)
     self.pooler = BertPooler(config)
     self.apply(self.init_bert_weights)
     self.embeddings.history_embeddings.weight[0].data.zero_(
     )  # self.embeddings.history_embeddings.padding_idx
     self.embeddings.turn_embeddings.weight[0].data.zero_(
     )  # self.embeddings.turn_embeddings.padding_idx
Beispiel #7
0
    def __init__(self, config, num_labels):
        super().__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)

        self.pooler = BertPooler(config)

        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(1*config.hidden_size, num_labels)
        self.apply(self.init_bert_weights)
Beispiel #8
0
 def __init__(self, config, num_labels):
     super(BertEarlyExit, self).__init__(config)
     self.config = config
     self.num_labels = num_labels
     self.bert = BertModel(config)
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.classifier = nn.Linear(config.hidden_size, num_labels)
     self.pooler = BertPooler(config)
     self.scale_weight_1 = nn.Linear(1, 1)
     self.scale_weight_2 = nn.Linear(1, 1)
     self.apply(self.init_bert_weights)
 def __init__(self,
              bert_cfg: BertConfig,
              cfg: Optional[Dict[str, Any]] = None):
     self.bert_cfg = bert_cfg
     super().__init__(bert_cfg)
     self.cfg = cfg
     self.embeddings = BertEmbeddings(bert_cfg)
     self.encoder = BertEncoder(bert_cfg)
     self.pooler = BertPooler(bert_cfg)
     self.num_choices = 4
     self.classifier = nn.Linear(bert_cfg.hidden_size, 1)
     self.apply(self.init_bert_weights)
Beispiel #10
0
    def __init__(self, bert, opt):
        super(LCF_BERT, self).__init__()

        self.bert_spc = bert
        self.opt = opt
        self.bert_local = copy.deepcopy(bert)
        self.dropout = nn.Dropout(opt.dropout)
        self.bert_SA = SelfAttention(bert.config, opt)
        self.mean_pooling_double = nn.Linear(opt.bert_dim * 2, opt.bert_dim)
        self.mean_pooling_single = nn.Linear(opt.bert_dim, opt.bert_dim)
        self.bert_pooler = BertPooler(bert.config)
        self.dense = nn.Linear(opt.bert_dim, opt.polarities_dim)
Beispiel #11
0
    def __init__(self, bert, opt):
        super(LCF_BERT, self).__init__()

        self.bert_spc = bert
        self.opt = opt
        # self.bert_local = copy.deepcopy(bert)  # Uncomment the line to use dual Bert
        self.bert_local = bert  # Default to use single Bert and reduce memory requirements
        self.dropout = nn.Dropout(opt.dropout)
        self.bert_SA = SelfAttention(bert.config, opt)
        self.linear_double = nn.Linear(opt.bert_dim * 2, opt.bert_dim)
        self.linear_single = nn.Linear(opt.bert_dim, opt.bert_dim)
        self.bert_pooler = BertPooler(bert.config)
        self.dense = nn.Linear(opt.bert_dim, opt.polarities_dim)
Beispiel #12
0
 def __init__(self, config, img_dim, num_region_toks):
     BertPreTrainedModel.__init__(self, config)
     self.embeddings = BertTextEmbeddings(config)
     self.img_embeddings = BertImageEmbeddings(config, img_dim)
     self.num_region_toks = num_region_toks
     self.region_token_embeddings = nn.Embedding(
         num_region_toks,
         config.hidden_size)
     self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.encoder = BertEncoder(config)
     self.pooler = BertPooler(config)
     self.apply(self.init_bert_weights)
Beispiel #13
0
    def __init__(self, config, gcn_adj_dim, gcn_adj_num, gcn_embedding_dim, num_labels, output_attentions=False, keep_multihead_output=False):
        super(VGCN_Bert, self).__init__(config,output_attentions,keep_multihead_output)
        self.embeddings = VGCNBertEmbeddings(config,gcn_adj_dim,gcn_adj_num, gcn_embedding_dim)
        self.encoder = BertEncoder(config, output_attentions=output_attentions,
                                           keep_multihead_output=keep_multihead_output)
        self.pooler = BertPooler(config)
        self.num_labels=num_labels
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_labels)
        self.will_collect_cls_states=False
        self.all_cls_states=[]
        self.output_attentions=output_attentions

        self.apply(self.init_bert_weights)
def test_BertPooler():
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
    config = BertConfig(vocab_size_or_config_json_file=32000,
                        hidden_size=768,
                        num_hidden_layers=12,
                        num_attention_heads=12,
                        intermediate_size=3072)
    embeddings = BertEmbeddings(config)
    model = BertPooler(config)

    embedding_output = embeddings(input_ids, token_type_ids)
    print(model(embedding_output))
Beispiel #15
0
 def __init__(self, config, num_choices=2):
     super(BertForMultipleChoiceWithMatch, self).__init__(config)
     self.num_choices = num_choices
     self.bert = BertModel(config)
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.classifier = nn.Linear(config.hidden_size, 1)
     self.classifier2 = nn.Linear(2 * config.hidden_size, 1)
     self.classifier3 = nn.Linear(3 * config.hidden_size, 1)
     self.classifier4 = nn.Linear(4 * config.hidden_size, 1)
     self.classifier6 = nn.Linear(6 * config.hidden_size, 1)
     self.ssmatch = SSingleMatchNet(config)
     self.pooler = BertPooler(config)
     self.fuse = FuseNet(config)
     self.apply(self.init_bert_weights)
Beispiel #16
0
    def __init__(self, config, num_labels):
        super().__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)

        self.pooler = BertPooler(config)

        self.evidence_pooler_p = EvidencePooler(config)

        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(2 * config.hidden_size, num_labels)

        torch.nn.init.xavier_uniform_(self.classifier.weight)

        self.apply(self.init_bert_weights)
    def __init__(self, config, num_labels=2):
        """

        :param config:
        :param num_labels:
        :param max_offset:
        :param offset_emb: size of pos embedding, 0 to disable
        """
        print('model_mask')

        super(BertMaskForSequenceClassification, self).__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)
        self.pooler = BertPooler(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

        self.classifier = nn.Linear(config.hidden_size, num_labels)
        self.apply(self.init_bert_weights)
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
                                                       do_lower_case=True)
Beispiel #18
0
 def __init__(self, config, label_size):
     super(BertModel, self).__init__(config, label_size)
     self.embeddings = BertEmbeddings(config, label_size)
     self.encoder = BertEncoder(config)
     self.pooler = BertPooler(config)
     self.apply(self.init_bert_weights)
 def __init__(self, config):
     super().__init__(config)
     self.embedder = ContinuousBertEmbeddings(config)
     self.encoder = BertEncoder(config)
     self.pooler = BertPooler(config)
     self.apply(self.init_bert_weights)
Beispiel #20
0
 def __init__(self, config):
     super(BertModel_custom, self).__init__(config)
     self.embeddings = BertEmbeddings_custom(config)
     self.encoder = BertEncoder(config)
     self.pooler = BertPooler(config)
     self.apply(self.init_bert_weights)
Beispiel #21
0
    def __init__(
        self,
        config,
        input_dim,
        output_dim,
        ent_emb_file,
        static_ent_emb_file,
        type_ent_emb_file,
        rel_ent_emb_file,
        tanh=False,
        norm=False,
        freeze=True,
    ):
        super(EntBertEncoder, self).__init__(config)
        if (
            ent_emb_file is not None
            or static_ent_emb_file is not None
            or type_ent_emb_file is not None
            or rel_ent_emb_file is not None
        ):
            self.encoder = BertEncoder(config)
        else:
            self.encoder = None
        self.pooler = BertPooler(config)

        self.apply(self.init_bert_weights)

        if ent_emb_file is not None:
            ent_emb_matrix = torch.from_numpy(np.load(ent_emb_file))
            self.ent_embeddings = nn.Embedding(
                ent_emb_matrix.size()[0], ent_emb_matrix.size()[1], padding_idx=0
            )
            self.ent_embeddings.weight.data.copy_(ent_emb_matrix)
            input_dim += ent_emb_matrix.size()[1]
            if freeze:
                for param in self.ent_embeddings.parameters():
                    param.requires_grad = False
        else:
            self.ent_embeddings = None

        if static_ent_emb_file is not None:
            static_ent_emb_matrix = torch.from_numpy(np.load(static_ent_emb_file))
            self.static_ent_embeddings = nn.Embedding(
                static_ent_emb_matrix.size()[0],
                static_ent_emb_matrix.size()[1],
                padding_idx=0,
            )
            self.static_ent_embeddings.weight.data.copy_(static_ent_emb_matrix)
            input_dim += static_ent_emb_matrix.size()[1]
            if freeze:
                for param in self.static_ent_embeddings.parameters():
                    param.requires_grad = False
        else:
            self.static_ent_embeddings = None

        if type_ent_emb_file is not None:
            type_ent_emb_matrix = torch.from_numpy(np.load(type_ent_emb_file))
            self.type_ent_embeddings = nn.Embedding(
                type_ent_emb_matrix.size()[0],
                type_ent_emb_matrix.size()[1],
                padding_idx=0,
            )
            self.type_ent_embeddings.weight.data.copy_(type_ent_emb_matrix)
            input_dim += type_ent_emb_matrix.size()[1]
            if freeze:
                for param in self.type_ent_embeddings.parameters():
                    param.requires_grad = False
        else:
            self.type_ent_embeddings = None

        if rel_ent_emb_file is not None:
            rel_ent_emb_matrix = torch.from_numpy(np.load(rel_ent_emb_file))
            self.rel_ent_embeddings = nn.Embedding(
                rel_ent_emb_matrix.size()[0],
                rel_ent_emb_matrix.size()[1],
                padding_idx=0,
            )
            self.rel_ent_embeddings.weight.data.copy_(rel_ent_emb_matrix)
            input_dim += rel_ent_emb_matrix.size()[1]
            if freeze:
                for param in self.rel_ent_embeddings.parameters():
                    param.requires_grad = False
        else:
            self.rel_ent_embeddings = None

        self.proj = nn.Linear(input_dim, output_dim)

        if tanh is True:
            self.proj_activation = nn.Tanh()
        else:
            self.proj_activation = None

        self.norm = norm
        if self.norm is True:
            self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
            self.dropout = nn.Dropout(config.hidden_dropout_prob)