def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if(large):
            self.model = BertModel.from_pretrained('bert-large-uncased', cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained('bert-base-uncased', cache_dir=temp_dir)

        self.finetune = finetune
Ejemplo n.º 2
0
    def __init__(self, model_path, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if(large):
            self.model = BertModel.from_pretrained(model_path)
        else:
            self.model = BertModel.from_pretrained(model_path)

        self.finetune = finetune
Ejemplo n.º 3
0
    def __init__(self, temp_dir, cased=False, finetune=False):
        super(Bert, self).__init__()
        if (cased):
            self.model = BertModel.from_pretrained('BETO', cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained('BETO', cache_dir=temp_dir)

        self.finetune = finetune
Ejemplo n.º 4
0
    def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if (large):
            self.model = BertModel.from_pretrained(BERT_PATH,
                                                   cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained(BERT_PATH,
                                                   cache_dir=temp_dir)

        self.finetune = finetune
Ejemplo n.º 5
0
    def __init__(self, device, config, labels=None):
        super().__init__()

        if config.model == "BertCased":
            self.bert = BertModel.from_pretrained('bert-base-cased', output_hidden_states=True)
        else:
            self.bert = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True)

        self.fc = nn.Linear(768*12, labels).to(device)
        self.device = device
    def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if (large):
            self.model = BertModel.from_pretrained('bert-large-uncased',
                                                   cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained(
                "/content/PhoBERT_base_transformers", cache_dir=temp_dir)

        self.finetune = finetune
Ejemplo n.º 7
0
    def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if (large):
            self.model = BertModel.from_pretrained('bert-large-uncased',
                                                   cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained(
                '/home/ybai/projects/PreSumm/PreSumm/temp/',
                cache_dir=temp_dir)

        self.finetune = finetune
Ejemplo n.º 8
0
    def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if(large):
            # self.model = BertModel.from_pretrained('bert-large-uncased', cache_dir=temp_dir)
            self.model = BertModel.from_pretrained(temp_dir, cache_dir='~/.cache/torch/pytorch_transformers')

        else:
            # self.model = BertModel.from_pretrained('bert-base-uncased', cache_dir=temp_dir)
            self.model = BertModel.from_pretrained(temp_dir, cache_dir='~/.cache/torch/pytorch_transformers')

        self.finetune = finetune
Ejemplo n.º 9
0
    def __init__(self, bert_model, temp_dir, finetune=False):
        super(Bert, self).__init__()

        if (bert_model == 'bert-base-multilingual-cased'):
            self.model = BertModel.from_pretrained(
                'bert-base-multilingual-cased', cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained(bert_model,
                                                   cache_dir=temp_dir)
            self.model.resize_token_embeddings(31756)
            self.word_embeddings = nn.Embedding(31756, 768, padding_idx=0)

        self.finetune = finetune
Ejemplo n.º 10
0
    def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if (large):
            self.model = BertModel.from_pretrained('bert-large-uncased',
                                                   cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained(
                'bert-base-chinese',
                cache_dir=
                r'C:\Users\Administrator\PycharmProjects\one\bertsum-chinese-LAI\temp'
            )

        self.finetune = finetune  #ture
Ejemplo n.º 11
0
    def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()

        if (large):
            #self.model = BertModel.from_pretrained('bert-base-multilingual-cased', cache_dir=temp_dir)
            config = BertConfig.from_json_file('bert-large/config.json')
            self.model = BertModel.from_pretrained('bert-large',
                                                   cache_dir=None,
                                                   config=config)
        else:
            self.model = BertModel.from_pretrained(
                'bert-base-multilingual-cased', cache_dir=temp_dir)

        self.finetune = finetune
Ejemplo n.º 12
0
    def __init__(self, device='cpu', language='en', labels=None):
        super().__init__()

        if language == 'en':
            self.bert = BertModel.from_pretrained('bert-base-cased')
        else:
            self.bert = BertModel.from_pretrained('bert-base-chinese')

        self.fc = nn.Linear(1536, 4)
        self.device = device
        self.lstm = nn.LSTM(input_size=768,
                            hidden_size=768,
                            num_layers=1,
                            dropout=0,
                            bidirectional=True)
Ejemplo n.º 13
0
    def __init__(self, data, args, num_episodes=None):
        self.data = data
        self.args = args
        self.num_episodes = num_episodes

        self.all_classes = np.unique(self.data['label'])
        self.num_classes = len(self.all_classes)
        if self.num_classes < self.args.way:
            raise ValueError("Total number of classes is less than #way.")

        self.idx_list = []
        for y in self.all_classes:
            self.idx_list.append(
                    np.squeeze(np.argwhere(self.data['label'] == y)))

        self.count = 0
        self.done_queue = Queue()

        self.num_cores = cpu_count() if args.n_workers is 0 else args.n_workers

        # print("{}, Initializing parallel data loader with {} processes".format(
        #     datetime.datetime.now().strftime('%02y/%02m/%02d %H:%M:%S'),
        #     self.num_cores), flush=True)

        if self.args.bert_path is not None:
            # use bert online
            print("{}, Loading pretrained bert from {}".format(
                datetime.datetime.now().strftime('%02y/%02m/%02d %H:%M:%S'),
                self.args.bert_path), flush=True)

            if self.args.cuda != -1:
                self.model = BertModel.from_pretrained(
                        'bert-base-uncased',
                        cache_dir=self.args.bert_path).cuda(self.args.cuda)
            else:
                self.model = BertModel.from_pretrained(
                        'bert-base-uncased',
                        cache_dir=self.args.bert_path)

            self.model.eval()

        self.p_list = []
        for i in range(self.num_cores):
            self.p_list.append(
                    Process(target=self.worker, args=(self.done_queue,)))

        for i in range(self.num_cores):
            self.p_list[i].start()
def main(raw_args=None):
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name",
                        type=str,
                        required=True,
                        help="model name e.g. bert-base-uncased")
    parser.add_argument("--cache_dir",
                        type=str,
                        default=None,
                        required=False,
                        help="Directory containing pytorch model")
    parser.add_argument("--pytorch_model_path",
                        type=str,
                        required=True,
                        help="/path/to/<pytorch-model-name>.bin")
    parser.add_argument("--tf_cache_dir",
                        type=str,
                        required=True,
                        help="Directory in which to save tensorflow model")
    args = parser.parse_args(raw_args)
    
    model = BertModel.from_pretrained(
        pretrained_model_name_or_path=args.model_name,
        state_dict=torch.load(args.pytorch_model_path),
        cache_dir=args.cache_dir
    )
    
    convert_pytorch_checkpoint_to_tf(
        model=model,
        ckpt_dir=args.tf_cache_dir,
        model_name=args.model_name
    )
Ejemplo n.º 15
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
Ejemplo n.º 16
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            # bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            config = BertConfig.from_pretrained(opt.pretrained_bert_name,
                                                output_attentions=True)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name,
                                             config=config)
            self.pretrained_bert_state_dict = bert.state_dict()
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
Ejemplo n.º 17
0
 def __init__(self, num_labels=2):
     super(BertForSequenceClassification, self).__init__()
     self.num_labels = num_labels
     self.bert = BertModel.from_pretrained('bert-base-uncased')
     self.dropout = nn.Dropout(dropout_prob)
     self.classifier = nn.Linear(hidden_size, num_labels)
     nn.init.xavier_normal_(self.classifier.weight)
Ejemplo n.º 18
0
    def __init__(
        self,
        bert_model_or_path: str = "bert-base-uncased",
        layers: str = "-1,-2,-3,-4",
        pooling_operation: str = "first",
        use_scalar_mix: bool = False,
    ):
        """
        Bidirectional transformer embeddings of words, as proposed in Devlin et al., 2018.
        :param bert_model_or_path: name of BERT model ('') or directory path containing custom model, configuration file
        and vocab file (names of three files should be - config.json, pytorch_model.bin/model.chkpt, vocab.txt)
        :param layers: string indicating which layers to take for embedding
        :param pooling_operation: how to get from token piece embeddings to token embedding. Either pool them and take
        the average ('mean') or use first word piece embedding as token embedding ('first)
        """
        super().__init__()

        self.tokenizer = BertTokenizer.from_pretrained(bert_model_or_path)
        self.model = BertModel.from_pretrained(
            pretrained_model_name_or_path=bert_model_or_path,
            output_hidden_states=True)
        self.layer_indexes = [int(x) for x in layers.split(",")]
        self.pooling_operation = pooling_operation
        self.use_scalar_mix = use_scalar_mix
        self.name = str(bert_model_or_path)
        self.static_embeddings = True

        self.model.to(flair.device)
        self.model.eval()
Ejemplo n.º 19
0
 def __init__(self, config: BertEncoderConfig = BertEncoderConfig()):
     assert isinstance(config, BertEncoderConfig)
     super().__init__(config)
     self.config = config
     self.model = BertModel.from_pretrained(self.config.mode,
                                            output_hidden_states=True)
     self.freeze_layers()
Ejemplo n.º 20
0
    def __init__(self, rnn_type, bert_model_path, bert_embedding_dim,
                 hidden_dim, n_layers, bidirectional, batch_first, dropout,
                 num_classes):
        super(BertRNN, self).__init__()
        self.rnn_type = rnn_type.lower()
        self.bidirectional = bidirectional
        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.bert = BertModel.from_pretrained(bert_model_path)
        for param in self.bert.parameters():
            param.requires_grad = True
        if rnn_type == 'lstm':
            self.rnn = nn.LSTM(bert_embedding_dim,
                               hidden_size=hidden_dim,
                               num_layers=n_layers,
                               bidirectional=bidirectional,
                               batch_first=batch_first,
                               dropout=dropout)
        elif rnn_type == 'gru':
            self.rnn = nn.GRU(bert_embedding_dim,
                              hidden_size=hidden_dim,
                              num_layers=n_layers,
                              bidirectional=bidirectional,
                              batch_first=batch_first,
                              dropout=dropout)
        else:
            self.rnn = nn.RNN(bert_embedding_dim,
                              hidden_size=hidden_dim,
                              num_layers=n_layers,
                              bidirectional=bidirectional,
                              batch_first=batch_first,
                              dropout=dropout)

        self.dropout = nn.Dropout(dropout)
        self.fc_rnn = nn.Linear(hidden_dim * 2, num_classes)
def get_bert(bert_type='bert'):
    tokenizer, model = None, None
    if (bert_type == 'bert'):
        ######## bert ###########

        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertModel.from_pretrained('bert-base-uncased')

        ########################

    if (bert_type == 'biobert'):
        #### Bio BERT #########

        model = bm.from_pretrained('biobert_v1.1_pubmed')
        tokenizer = BertTokenizer(vocab_file="biobert_v1.1_pubmed/vocab.txt",
                                  do_lower_case=True)

        #### Bio BERT #########

    if (bert_type == 'scibert'):
        #### sci bert #########

        config = AutoConfig.from_pretrained('allenai/scibert_scivocab_uncased',
                                            output_hidden_states=False)
        tokenizer = AutoTokenizer.from_pretrained(
            'allenai/scibert_scivocab_uncased')
        model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased',
                                          config=config)

        #######################

    return tokenizer, model
Ejemplo n.º 22
0
 def __init__(self, opt, model_path: str):
     super(ModelTrained, self).__init__()
     self.opt = opt
     self.model_path = model_path
     bert = BertModel.from_pretrained(opt.pretrained_bert_name)
     self.model = opt.model_class(bert, opt).to(opt.device)
     self.model.load_state_dict(torch.load(model_path))
Ejemplo n.º 23
0
def load_bert_to_cache(cache: Dict):
    bert_model_name = 'bert-base-multilingual-cased'
    if 'bert_tokenizer' not in cache:
        cache['bert_tokenizer'] = BertTokenizer.from_pretrained(
            bert_model_name)
    if 'bert_model' not in cache:
        cache['bert_model'] = BertModel.from_pretrained(bert_model_name)
Ejemplo n.º 24
0
 def __init__(self, bert_model_path, large, temp_dir, finetune=False):
     super(Bert, self).__init__()
     self.model_name = 'bert-large-uncased' if large else 'bert-base-uncased'
     self.model = BertModel.from_pretrained(path.join(
         bert_model_path, self.model_name),
                                            cache_dir=temp_dir)
     self.finetune = finetune
    def __init__(self):
        model_classes = {
            # 'bert_spc': BERT_SPC,
            # 'aen_bert': AEN_BERT,
            'lcf_bert': LCF_BERT
        }

        # set your trained models here
        state_dict_paths = {
            'lcf_bert': 'models/lcf_bert_combined_val_acc0.7563',
            # 'lcf_bert': 'models/lcf_bert_twitter_val_acc0.7283',
            # 'lcf_bert': 'models/lcf_bert_twitter_val_acc0.7225',
            # 'bert_spc': 'state_dict/bert_spc_laptop_val_acc0.268',
            # 'aen_bert': 'state_dict/aen_bert_laptop_val_acc0.2006'
        }

        self.opt = get_parameters()
        self.opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # self.opt.device = torch.device("cpu")

        print("TargetSentimentAnalyzer using device:",self.opt.device)

        self.tokenizer = Tokenizer4Bert(self.opt.max_seq_len, self.opt.pretrained_bert_name)
        self.bert = BertModel.from_pretrained(self.opt.pretrained_bert_name)
        self.model = model_classes[self.opt.model_name](self.bert, self.opt).to(self.opt.device)


        print('loading model {0} ...'.format(self.opt.model_name))
        self.model.load_state_dict(torch.load(state_dict_paths[self.opt.model_name], map_location=torch.device('cpu')))
        # if torch.cuda.is_available():
        # else:
        #     self.model.load_state_dict(torch.load(state_dict_paths[self.opt.model_name]))

        self.model.eval()
        torch.autograd.set_grad_enabled(False)
Ejemplo n.º 26
0
 def __init__(self, server):
     self.server = server
     self.dimensions = 768
     transformer_type = 'bert-base-uncased'
     self.tokenizer = BertTokenizer.from_pretrained(transformer_type)
     self.model = BertModel.from_pretrained(transformer_type)
     self.model.eval()
Ejemplo n.º 27
0
def main():
    training_dictionaries = load_training_dictionaries()
    bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    bert_model = BertModel.from_pretrained('bert-base-uncased')

    extractor_model = ExtractorModel(bert_tokenizer, bert_model)

    # train_extractor(
    #     extractor_model,
    #     data=training_dictionaries
    # )

    abstractor_model = AbstractorModelRNN(bert_tokenizer, bert_model)

    # train_abstractor(
    #     abstractor_model,
    #     data=training_dictionaries
    # )

    rl_model = RLModel(
        extractor_model,
        abstractor_model,
    )

    train_system(
        rl_model=rl_model,
        data=training_dictionaries
    )
Ejemplo n.º 28
0
    def __init__(self, args, temp_dir, finetune=False):

        super(Bert, self).__init__()
        if args.pretrained_model_type in ['bert-base-uncased', 'bert-base-multilingual-uncased']:
            self.model = BertModel.from_pretrained(args.pretrained_model_type, cache_dir=temp_dir)
        if args.pretrained_model_type in ['rubert-deeppavlov']:
            name = args.pretrained_model_type
            config = BertConfig.from_json_file(mapper(name, 'config'))
            self.model = BertModel.from_pretrained(mapper(name, 'model'), config=config)

        if not self.model:
            raise NotImplementedError("self.model")

        bert_data = BertData(args)
        self.model.resize_token_embeddings(len(bert_data.tokenizer))
        self.finetune = finetune
Ejemplo n.º 29
0
    def __init__(self, args, dictionary, left_pad=False):
        super().__init__(dictionary)
        self.dropout = args.dropout

        from pytorch_transformers import RobertaModel, BertModel
        from pytorch_transformers.file_utils import PYTORCH_TRANSFORMERS_CACHE
        from pytorch_transformers import RobertaConfig, RobertaTokenizer, BertConfig, BertTokenizer

        if args.pretrained_bert_model.startswith('roberta'):
            self.embed = RobertaModel.from_pretrained(
                args.pretrained_bert_model,
                cache_dir=PYTORCH_TRANSFORMERS_CACHE /
                'distributed_{}'.format(args.distributed_rank))
            # self.context = RobertaModel.from_pretrained(args.pretrained_bert_model,
            #         cache_dir=PYTORCH_TRANSFORMERS_CACHE / 'distributed_{}'.format(args.distributed_rank))
            self.config = RobertaConfig.from_pretrained(
                args.pretrained_bert_model)
            self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base')

        else:
            self.embed = BertModel.from_pretrained(
                args.pretrained_bert_model,
                cache_dir=PYTORCH_TRANSFORMERS_CACHE /
                'distributed_{}'.format(args.distributed_rank))
            # self.context = BertModel.from_pretrained(args.pretrained_bert_model,
            #         cache_dir=PYTORCH_TRANSFORMERS_CACHE / 'distributed_{}'.format(args.distributed_rank))
            self.config = BertConfig.from_pretrained(
                args.pretrained_bert_model)

            self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

        self.padding_idx = self.tokenizer.convert_tokens_to_ids(
            self.tokenizer.pad_token)
Ejemplo n.º 30
0
    def __init__(self, opt):
        self.opt = opt
        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test1']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        self.testset = ABSADataset(opt.dataset_file['test1'], tokenizer)

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))

        model_path = 'state_dict/bert_spc_law_val_acc0.5314.hdf5'  # provide best model path
        self.model.load_state_dict(torch.load(model_path))