예제 #1
0
 def __init__(self, opt):
     """
     初始化模型和数据预处理,并token化
     :param opt: argparse的参数
     """
     self.opt = opt
     #是否是bert类模型,使用bert类模型初始化, 非BERT类使用GloVe
     if 'bert' in opt.model_name:
         #初始化tokenizer
         tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                    opt.pretrained_bert_name,
                                    cache_dir=opt.pretrained_bert_cache_dir)
         # 加载BERT模型
         bert = BertModel.from_pretrained(
             opt.pretrained_bert_name,
             cache_dir=opt.pretrained_bert_cache_dir)
         # 然后把BERT模型和opt参数传入自定义模型,进行进一步处理
         self.model = opt.model_class(bert, opt).to(opt.device)
     else:
         # 自定义tokenizer,生成id2word,word2idx
         tokenizer = build_tokenizer(
             fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
             max_seq_len=opt.max_seq_len,
             dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
         #返回所有单词的词嵌入 [word_nums, embedding_dimesion]
         embedding_matrix = build_embedding_matrix(
             word2idx=tokenizer.word2idx,
             embed_dim=opt.embed_dim,
             dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                 str(opt.embed_dim), opt.dataset))
         # 加载模型
         self.model = opt.model_class(embedding_matrix, opt).to(opt.device)
     # 加载训练集
     self.trainset = ABSADataset(opt.dataset_file['train'],
                                 tokenizer,
                                 recreate_caches=opt.recreate_caches)
     self.testset = ABSADataset(opt.dataset_file['test'],
                                tokenizer,
                                recreate_caches=opt.recreate_caches)
     #如果valset_ratio为0,测试集代替验证集
     assert 0 <= opt.valset_ratio < 1
     if opt.valset_ratio > 0:
         valset_len = int(len(self.trainset) * opt.valset_ratio)
         self.trainset, self.valset = random_split(
             self.trainset, (len(self.trainset) - valset_len, valset_len))
     else:
         self.valset = self.testset
     # 检查cuda的内存
     if opt.device.type == 'cuda':
         logger.info('cuda 可用内存: {}'.format(
             torch.cuda.memory_allocated(device=opt.device.index)))
     self._print_args()
예제 #2
0
    def __init__(self, opt):
        self.opt = opt

        tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
        bert = BertModel.from_pretrained(opt.pretrained_bert_name)
        self.model = opt.model_class(bert, opt).to(opt.device)

        self.trainset = IOBDataset(opt.dataset_file['train'], tokenizer)

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #3
0
    def __init__(self, opt):
        self.opt = opt
        self.tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                        opt.pretrained_bert_name)
        bert = BertModel.from_pretrained(opt.pretrained_bert_name)
        self.model = opt.model_class(bert, opt).to(opt.device)

        print('loading model {0} ...'.format(opt.model_name))
        self.model.load_state_dict(torch.load(opt.state_dict_path))
        self.model = self.model.to(opt.device)
        # switch model to evaluation mode
        self.model.eval()
        torch.autograd.set_grad_enabled(False)
예제 #4
0
    def __init__(self, opt):
        self.opt = opt
        if 'bert' in opt.model_name:
            self.tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                            opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
            print('loading model {0} ...'.format(opt.model_name))
            # remember removed map_location='cpu' when using on server w GPU
            self.model.load_state_dict(torch.load(opt.state_dict_path))

        # switch model to evaluation mode
        self.model.eval()
        torch.autograd.set_grad_enabled(False)
    def __init__(self):
        opt = module_opt()
        bert = BertModel.from_pretrained('bert-base-uncased')
        self.tokenizer = Tokenizer4Bert(80, 'bert-base-uncased')
        model = LCF_BERT(bert, opt).to(opt.device)

        print('loading sa module ...')
        model.load_state_dict(
            torch.load('state_dict/lcf_bert_movie_val_acc0.8203',
                       map_location=torch.device('cpu')))
        model.eval()
        torch.autograd.set_grad_enabled(False)
        self.model = model
        self.opt = opt
    def __init__(self, arguments):
        # 项目的超参
        parser = argparse.ArgumentParser()
        parser.add_argument("-e",
                            "--EPOCHS",
                            default=5,
                            type=int,
                            help="train epochs")
        parser.add_argument("-b",
                            "--BATCH",
                            default=2,
                            type=int,
                            help="batch size")
        self.args = parser.parse_args()
        self.arguments = arguments
        self.dataset = Dataset(epochs=self.args.EPOCHS,
                               batch=self.args.BATCH,
                               val_batch=self.args.BATCH)

        if 'bert' in self.arguments.model_name:
            self.tokenizer = Tokenizer4Bert(
                max_seq_len=self.arguments.max_seq_len,
                pretrained_bert_name=os.path.join(
                    os.getcwd(), self.arguments.pretrained_bert_name))
            bert = BertModel.from_pretrained(pretrained_model_name_or_path=self
                                             .arguments.pretrained_bert_name)
            self.model = self.arguments.model_class(bert, self.arguments).to(
                self.arguments.device)
        else:
            self.tokenizer = Util.bulid_tokenizer(
                fnames=[
                    self.arguments.dataset_file['train'],
                    self.arguments.dataset_file['test']
                ],
                max_seq_len=self.arguments.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(self.arguments.dataset))
            embedding_matrix = Util.build_embedding_matrix(
                word2idx=self.tokenizer.word2idx,
                embed_dim=self.arguments.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(self.arguments.embed_dim), self.arguments.dataset))
            self.model = self.arguments.model_class(
                embedding_matrix, self.arguments).to(self.arguments.device)

        if self.arguments.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(
                    device=self.arguments.device.index)))

        Util.print_args(model=self.model, logger=logger, args=self.arguments)
예제 #7
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='temp_data/' +
                '{0}_tokenizer.dat'.format(opt.dataset),
                step=4 if opt.tabsa else 3)
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='temp_data/' + '{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        if opt.tabsa:
            if opt.tabsa_with_absa:
                self.trainset = TABSADataset(opt.dataset_file['train'],
                                             tokenizer, True)
                self.testset = TABSADataset(opt.dataset_file['test'],
                                            tokenizer, True)
            else:
                self.trainset = TABSADataset(opt.dataset_file['train'],
                                             tokenizer, False)
                self.testset = TABSADataset(opt.dataset_file['test'],
                                            tokenizer, False)
        else:
            self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
            self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)

        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #8
0
def initialize():

    opt = get_parameters()
    opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
    bert = BertModel.from_pretrained(opt.pretrained_bert_name)
    model = model_classes[opt.model_name](bert, opt).to(opt.device)

    print('loading model {0} ...'.format(opt.model_name))
    torch.autograd.set_grad_enabled(False)
    model.load_state_dict(torch.load(state_dict_paths[opt.model_name]))
    model.eval()
    torch.autograd.set_grad_enabled(False)

    return opt, tokenizer, model
예제 #9
0
    def __init__(self, opt):
        self.opt = opt
        if 'bert' in opt.model_name:
            self.tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = BERT_SSC(bert, opt).to(opt.device)
            logger.info('loading model {0} ... done'.format(opt.model_name))
            # remember removed map_location='cpu' when using on server w GPU
            self.model.load_state_dict(torch.load(opt.state_dict_path))

            # switch model to evaluation mode
            self.model.eval()
            torch.autograd.set_grad_enabled(False)
        else:
            logger.info('Now, we only support bert-based model')
            raise ValueError("Now, we only support bert-based model")
예제 #10
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
        # else:
        #     tokenizer = build_tokenizer(
        #         fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
        #         max_seq_len=opt.max_seq_len,
        #         dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
        #     embedding_matrix = build_embedding_matrix(
        #         word2idx=tokenizer.word2idx,
        #         embed_dim=opt.embed_dim,
        #         dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(opt.embed_dim), opt.dataset))
        #     self.model = opt.model_class(embedding_matrix, opt).to(opt.device)
        if 'pair' in opt.model_name:
            if not self.opt.do_eval:
                self.trainset = ABSADataset_sentence_pair(
                    opt.dataset_file['train'], tokenizer)
            self.testset = ABSADataset_sentence_pair(opt.dataset_file['test'],
                                                     tokenizer)
        elif 'SA' in opt.model_name:
            if not self.opt.do_eval:
                self.trainset = SADataset(opt.dataset_file['train'], tokenizer)
            self.testset = SADataset(opt.dataset_file['test'], tokenizer)
        else:
            if not self.opt.do_eval:
                self.trainset = ABSADataset(opt.dataset_file['train'],
                                            tokenizer)
            self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        assert 0 <= opt.valset_ratio < 1
        if not self.opt.do_eval:
            if opt.valset_ratio > 0:
                valset_len = int(len(self.trainset) * opt.valset_ratio)
                self.trainset, self.valset = random_split(
                    self.trainset,
                    (len(self.trainset) - valset_len, valset_len))
            else:
                self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
    def __init__(self, data):
        self.net = None
        self.data = data
        self.args = args
        self.idx2label = dict(
            (i, args.labels[i]) for i in range(len(args.labels)))

        self.tokenizer = Tokenizer4Bert(max_seq_len=self.args.max_seq_len,
                                        pretrained_bert_name=os.path.join(
                                            os.getcwd(),
                                            self.args.pretrained_bert_name))
        bert = BertModel.from_pretrained(
            os.path.join(os.getcwd(), self.args.pretrained_bert_name))
        model = self.args.model_classes[args.model_name](bert, self.args).to(
            self.args.device)

        if self.args.topics is not None:
            self.net_0 = Util.load_model(model=model,
                                         output_dir=os.path.join(
                                             os.getcwd(), args.best_model_path,
                                             self.args.topics[0]))
            self.net_0.eval()
            self.net_1 = Util.load_model(model=model,
                                         output_dir=os.path.join(
                                             os.getcwd(), args.best_model_path,
                                             self.args.topics[1]))
            self.net_1.eval()
            self.net_2 = Util.load_model(model=model,
                                         output_dir=os.path.join(
                                             os.getcwd(), args.best_model_path,
                                             self.args.topics[2]))
            self.net_2.eval()
            self.net_3 = Util.load_model(model=model,
                                         output_dir=os.path.join(
                                             os.getcwd(), args.best_model_path,
                                             self.args.topics[3]))
            self.net_3.eval()
            self.net_4 = Util.load_model(model=model,
                                         output_dir=os.path.join(
                                             os.getcwd(), args.best_model_path,
                                             self.args.topics[4]))
            self.net_4.eval()
        else:
            self.net = Util.load_model(model=model,
                                       output_dir=os.path.join(
                                           os.getcwd(), args.best_model_path))
    def __init__(self, arguments):
        # 项目的超参
        parser = argparse.ArgumentParser()
        parser.add_argument("-e", "--EPOCHS", default=5, type=int, help="train epochs")
        parser.add_argument("-b", "--BATCH", default=2, type=int, help="batch size")
        self.args = parser.parse_args()
        self.arguments = arguments
        self.dataset = Dataset(epochs=self.args.EPOCHS, batch=self.args.BATCH, val_batch=self.args.BATCH)

        if 'bert' in self.arguments.model_name:
            self.tokenizer = Tokenizer4Bert(max_seq_len=self.arguments.max_seq_len,
                                            pretrained_bert_name=os.path.join(os.getcwd(),
                                                                              self.arguments.pretrained_bert_name))
            bert = BertModel.from_pretrained(pretrained_model_name_or_path=self.arguments.pretrained_bert_name)
            self.model = self.arguments.model_class(bert, self.arguments).to(self.arguments.device)
        else:
            self.tokenizer = Util.bulid_tokenizer(
                fnames=[self.arguments.dataset_file['train'], self.arguments.dataset_file['test']],
                max_seq_len=self.arguments.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(self.arguments.dataset)
            )
            embedding_matrix = Util.build_embedding_matrix(
                word2idx=self.tokenizer.word2idx,
                embed_dim=self.arguments.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.arguments.embed_dim), self.arguments.dataset)
            )
            self.model = self.arguments.model_class(embedding_matrix, self.arguments).to(self.arguments.device)

        if self.arguments.device.type == 'cuda':
            logger.info(
                'cuda memory allocated: {}'.format(torch.cuda.memory_allocated(device=self.arguments.device.index)))

        Util.print_args(model=self.model, logger=logger, args=self.arguments)

        target_text, stance, _, _ = self.dataset.get_all_data()
        target = np.asarray([i['TARGET'].lower() for i in target_text])
        text = np.asarray([i['TEXT'].lower() for i in target_text])
        stance = np.asarray([i['STANCE'] for i in stance])
        self.target_set = set()
        for tar in target:
            self.target_set.add(tar)
        text = PreProcessing(text).get_file_text()
        trainset = ABSADataset(data_type=None, fname=(target, text, stance), tokenizer=self.tokenizer)

        valset_len = int(len(trainset) * self.arguments.valset_ratio)
        self.trainset, self.valset = random_split(trainset, (len(trainset) - valset_len, valset_len))
예제 #13
0
    def __init__(self, opt):
        self.opt = opt
        print("loading {0} tokenizer...".format(opt.dataset))
        self.bert_tokenizer = Tokenizer4Bert('bert-base-chinese')

        self.model_list = []
        for i, model_name in enumerate(opt.model_name_list):
            print('loading model {0}... '.format(model_name))
            bert = BertModel.from_pretrained('bert-base-chinese')
            model = nn.DataParallel(opt.model_class_list[i](bert, opt).to(
                opt.device))
            model.load_state_dict(torch.load(opt.state_dict_path_list[i]))
            # switch model to evaluation mode
            model.eval()
            self.model_list.append(model)

        torch.autograd.set_grad_enabled(False)
예제 #14
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            # set bert_based_vocab
            tokenizer = Tokenizer4Bert(
                opt.max_seq_len,
                '/data/kkzhang/aaa/command/bert-base-uncased-vocab.txt')
            #tokenizer = Tokenizer4Bert(opt.max_seq_len, '/home/kkzhang/bert-large-uncased/bert-large-uncased-vocab.txt')
            # set bert pre_train model
            bert = BertModel.from_pretrained(
                '/data/kkzhang/WordeEmbedding/bert_base/')

            ##### multi gpu ##########
            if torch.cuda.device_count() > 1:
                logging.info('The device has {} gpus!!!!!!!!!!!!!'.format(
                    torch.cuda.device_count()))
                bert = nn.DataParallel(bert)

            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #15
0
    def __init__(self, opt):
        self.opt = opt

        tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
        bert = BertModel.from_pretrained(opt.pretrained_bert_name)
        self.model = opt.model_class(bert, opt).to(opt.device)

        self.trainset = ABSADataset('./data/Train_Data.csv', tokenizer)
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #16
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        # self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        # self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)

        ## using our own dataset
        data = pd.read_csv('train_data1.csv')
        # test_data = pd.read_csv('../test_tOlRoBf.csv')
        train_data, test_data = train_test_split(data,
                                                 test_size=0.1,
                                                 random_state=42)
        self.trainset = ABSADataset(train_data, tokenizer)
        self.testset = ABSADataset(test_data, tokenizer)

        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #17
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = nn.DataParallel(opt.model_class(bert,
                                                         opt)).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)
        print('pretrainn model load done')

        self.trainset = CLFDataset(opt.dataset_file['train'],
                                   tokenizer,
                                   label_dict=opt.label_dict)
        print('trainset build done')
        assert 0 <= opt.valset_ratio < 1

        self.testset = self.trainset
        self.valset = self.testset

        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
            self.testset = self.valset
        else:
            self.testset = self.trainset
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #18
0
    def __init__(self, opt):
        self.opt = opt
        tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
        if self.opt.bert_path:
            bert_path = self.opt.bert_path.replace("\r", "").replace("\n", "")
        bert = BertModel.from_pretrained(bert_path)
        self.model = opt.model_class(bert, opt).to(opt.device)
        self.trainset = PreTrainDataset(opt.dataset, tokenizer, train_or_test='train')
        np.random.shuffle(self.trainset.data)
        self.testset = PreTrainDataset(opt.dataset, tokenizer, train_or_test='test')
        # np.random.shuffle(self.testset.data)
        if self.opt.cross_val_fold < 0:
            self.valset = PreTrainDataset(opt.dataset, tokenizer, train_or_test='val')
        if self.opt.cross_val_fold == 0:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #19
0
파일: main.py 프로젝트: wwz58/ABSA-PyTorch
    def __init__(self, opt):
        self.opt = opt

        if opt.model_name.lower() in ['vh_bert', 'bert_att', 'my_lcf']:
            tokenizer = BertTokenizer.from_pretrained(opt.pretrained_bert_name)
            config = BertConfig.from_pretrained(opt.pretrained_bert_name,
                                                output_attentions=True)
            self.model = opt.model_class(config, ).to(opt.device)
        elif 'bert' in opt.model_name.lower():
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            config = BertConfig.from_pretrained(opt.pretrained_bert_name,
                                                output_attentions=True)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name,
                                             config=config)
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='./cache/{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='./cache/{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
def get_model(models):
    opt_list = []
    pred_list = []
    for model in models:
        opt = main(model)
        opt_list.append(opt)

    tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
    bert = BertModel.from_pretrained(opt.pretrained_bert_name,
                                     output_hidden_states=True)
    testset = ABSADataset(opt.dataset_file['test'], tokenizer)
    for opt in opt_list:
        if (opt.model_name == "bert_spc" or opt.model_name == "lcf_bert"):
            bert1 = BertModel.from_pretrained(opt.pretrained_bert_name)
            pred = Predictor(opt, tokenizer, bert1, testset)
        else:
            pred = Predictor(opt, tokenizer, bert, testset)
        predictions = pred.save_predictions()
        pred_list.append(predictions)
    return pred_list
예제 #21
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.ç)

        if opt.dataset in ['twitter', 'restaurant', 'laptop']:
            self.trainset = ABSADataset(opt.dataset_file['train'],
                                        tokenizer)  #返回 torch 的dataset类
            self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        else:
            self.trainset = CovData(opt.dataset_file['train'],
                                    tokenizer)  #返回 torch 的dataset类
            self.testset = CovData(opt.dataset_file['test'], tokenizer)
        # 定义切分数据集的比例 切分训练集
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #22
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name,
                                             output_hidden_states=True)
            # tokenizer = Tokenizer4Bert(opt.max_seq_len, '/content/drive/My Drive/FYP/pretrained_BERT_further_trained_with_criminal_corpus/vocab.txt')
            # bert = BertModel.from_pretrained('/content/drive/My Drive/FYP/pretrained_BERT_further_trained_with_criminal_corpus')
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        self.trainset = ABSADataset(
            opt.dataset_file['train'],
            './datasets/semeval14/law_train.raw.graph', tokenizer)
        self.testset = ABSADataset(opt.dataset_file['test'],
                                   './datasets/semeval14/law_train.raw.graph',
                                   tokenizer)
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
 def __init__(self, opt):
     self.opt = opt
     if 'bert' in opt.model_name:
         self.tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
         bert = BertModel.from_pretrained(opt.pretrained_bert_name)
         self.model = opt.model_class(bert, opt).to(opt.device)
     else:
         self.tokenizer = build_tokenizer(
             fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
             max_seq_len=opt.max_seq_len,
             dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
         embedding_matrix = build_embedding_matrix(
             word2idx=self.tokenizer.word2idx,
             embed_dim=opt.embed_dim,
             dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(opt.embed_dim), opt.dataset))
         self.model = opt.model_class(embedding_matrix, opt)
     print('loading model {0} ...'.format(opt.model_name))
     self.model.load_state_dict(torch.load(opt.state_dict_path))
     self.model = self.model.to(opt.device)
     # switch model to evaluation mode
     self.model.eval()
     torch.autograd.set_grad_enabled(False)
def get_model(models):
    opt_list = []
    models_list = []
    for model in models:
        opt = main(model)
        opt_list.append(opt)

    tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
    bert = BertModel.from_pretrained(opt.pretrained_bert_name,
                                     output_hidden_states=True)

    for opt in opt_list:
        if (opt.model_name == "bert_spc" or opt.model_name == "lcf_bert"):
            bert1 = BertModel.from_pretrained(opt.pretrained_bert_name)
            pred = Preloader(opt, tokenizer, bert1)
            models_list.append(pred.get_model())

        else:
            pred = Preloader(opt, tokenizer, bert)
            models_list.append(pred.get_model())

    return models_list, opt_list, tokenizer
    def __init__(self, opt):
        self.opt = opt
        tokenizer = Tokenizer4Bert(opt.max_length, opt.pretrained_bert_name)
        bert_model = BertModel.from_pretrained(opt.pretrained_bert_name,
                                               output_hidden_states=True)
        self.pretrained_bert_state_dict = bert_model.state_dict()
        self.model = opt.model_class(bert_model, opt).to(opt.device)

        print('loading model {0} ...'.format(opt.model_name))

        self.model.load_state_dict(torch.load(opt.state_dict_path))
        self.model = self.model.to(opt.device)

        torch.autograd.set_grad_enabled(False)

        testset = BertSentenceDataset(opt.dataset_file['test'],
                                      tokenizer,
                                      target_dim=self.opt.polarities_dim,
                                      opt=opt)
        self.test_dataloader = DataLoader(dataset=testset,
                                          batch_size=opt.eval_batch_size,
                                          shuffle=False)
예제 #26
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            # freeze pretrained bert params
            # for param in bert.parameters():
            #     param.requires_grad = False
            self.model = opt.model_class(bert, opt)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        self.train_data_loader = DataLoader(dataset=trainset,
                                            batch_size=opt.batch_size,
                                            shuffle=True)
        self.test_data_loader = DataLoader(dataset=testset,
                                           batch_size=opt.batch_size,
                                           shuffle=False)

        if opt.device.type == 'cuda':
            self.model = nn.DataParallel(self.model).cuda()
            print("cuda memory allocated:",
                  torch.cuda.memory_allocated(device=opt.device.index))
        else:
            self.model = self.model.to(opt.device)
        self._print_args()
예제 #27
0
    def __init__(self, opt):
        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name + '/vocab.txt')
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                fname=opt.embed_fname,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.train_dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0 and (not opt.val_test):
            print('Splitting trainset in train and val')
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            print('Setting testset as valset through valsetratio = 0')
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()
예제 #28
0
    def __init__(self, opt):
        self.opt = opt
        if 'bert' in opt.model_name:
            # opt.learning_rate = 2e-5
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)

            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            # opt.learning_rate = 0.001
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        self.train_data_loader = DataLoader(dataset=trainset,
                                            batch_size=opt.batch_size,
                                            shuffle=True)
        self.test_data_loader = DataLoader(dataset=testset,
                                           batch_size=opt.batch_size,
                                           shuffle=False)

        if opt.device.type == 'cuda':
            logging.info("cuda memory allocated:{}".format(
                torch.cuda.memory_allocated(device=opt.device.index)))

        self._log_write_args()
예제 #29
0
    model_classes = {
        'bert_spc': BERT_SPC,
        'aen_bert': AEN_BERT,
        'lcf_bert': LCF_BERT
    }
    # set your trained models here
    state_dict_paths = {
        'lcf_bert': 'state_dict/lcf_bert_laptop_val_acc0.2492',
        'bert_spc': 'state_dict/bert_spc_laptop_val_acc0.268',
        'aen_bert': 'state_dict/aen_bert_laptop_val_acc0.2006'
    }

    opt = get_parameters()
    opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
    bert = BertModel.from_pretrained(opt.pretrained_bert_name)
    model = model_classes[opt.model_name](bert, opt).to(opt.device)

    print('loading model {0} ...'.format(opt.model_name))
    model.load_state_dict(torch.load(state_dict_paths[opt.model_name]))
    model.eval()
    torch.autograd.set_grad_enabled(False)

    # input: This little place has a cute interior decor and affordable city prices.
    # text_left = This little place has a cute
    # aspect = interior decor
    # text_right = and affordable city prices.

    text_bert_indices, bert_segments_ids, text_raw_bert_indices, aspect_bert_indices = \
        prepare_data('This little place has a cute', 'interior decor', 'and affordable city prices.', tokenizer)
예제 #30
0
    def __int__(self, opt):

        self.opt = opt

        if 'bert' in opt.model_name:
            tokenizer = Tokenizer4Bert(opt.max_seq_len,
                                       opt.pretrained_bert_name)
            bert = BertModel.from_pretrained(opt.pretrained_bert_name)
            self.model = opt.model_class(bert, opt).to(opt.device)
        else:
            tokenizer = build_tokenizer(
                fnames=[opt.dataset_file['train'], opt.datasets_file['test']],
                max_seq_len=opt.max_seq_len,
                dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
            embedding_matrix = build_embedding_matrix(
                word2idx=tokenizer.word2idx,
                embed_dim=opt.embed_dim,
                dat_fname='{0}_{1}_embedding_matrix.dat'.format(
                    str(opt.embed_dim), opt.dataset))
            self.model = opt.model_class(embedding_matrix, opt).to(opt.device)

        self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
        self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(
                self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        if opt.device.type == 'cuda':
            logger.info('cuda memory allocated: {}'.format(
                torch.cuda.memory_allocated(device=opt.device.index)))
        self._print_args()

        def _print_args(self):
            n_trainable_params, n_nontrainable_params = 0, 0
            for p in self.model.parameters():
                n_params = torch.prod(torch.tensor(p.shape))
                if p.requires_grad:
                    n_trainable_params += n_params
                else:
                    n_nontrainable_params += n_params
            logger.info(
                'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(
                    n_trainable_params, n_nontrainable_params))
            logger.info('> training arguments:')
            for arg in vars(self.opt):
                logger.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))

        def _reset_params(self):
            for child in self.model.children():
                if type(child) != BertModel:  # skip bert params
                    for p in child.parameters():
                        if p.requires_grad:
                            if len(p.shape) > 1:
                                self.opt.initializer(p)
                            else:
                                stdv = 1. / math.sqrt(p.shape[0])
                                torch.nn.init.uniform_(p, a=-stdv, b=stdv)

        def _train(self, criterion, optimizer, train_data_loader,
                   val_data_loader):
            max_val_acc = 0
            max_val_f1 = 0
            global_step = 0
            path = None
            for epoch in range(self.opt.num_epoch):
                logger.info('>' * 100)
                logger.info('epoch: {}'.format(epoch))
                n_correct, n_total, loss_total = 0, 0, 0
                # switch model to training mode
                self.model.train()
                for i_batch, sample_batched in enumerate(train_data_loader):
                    global_step += 1
                    # clear gradient accumulators
                    optimizer.zero_grad()

                    inputs = [
                        sample_batched[col].to(self.opt.device)
                        for col in self.opt.inputs_cols
                    ]
                    outputs = self.model(inputs)
                    targets = sample_batched['polarity'].to(self.opt.device)

                    loss = criterion(outputs, targets)
                    loss.backward()
                    optimizer.step()

                    n_correct += (torch.argmax(outputs,
                                               -1) == targets).sum().item()
                    n_total += len(outputs)
                    loss_total += loss.item() * len(outputs)
                    if global_step % self.opt.log_step == 0:
                        train_acc = n_correct / n_total
                        train_loss = loss_total / n_total
                        logger.info('loss: {:.4f}, acc: {:.4f}'.format(
                            train_loss, train_acc))

                val_acc, val_f1 = self._evaluate_acc_f1(val_data_loader)
                logger.info('> val_acc: {:.4f}, val_f1: {:.4f}'.format(
                    val_acc, val_f1))
                if val_acc > max_val_acc:
                    max_val_acc = val_acc
                    if not os.path.exists('state_dict'):
                        os.mkdir('state_dict')
                    path = 'state_dict/{0}_{1}_val_acc{2}'.format(
                        self.opt.model_name, self.opt.dataset,
                        round(val_acc, 4))
                    torch.save(self.model.state_dict(), path)
                    logger.info('>> saved: {}'.format(path))
                if val_f1 > max_val_f1:
                    max_val_f1 = val_f1

            return path

        def _evaluate_acc_f1(self, data_loader):
            n_correct, n_total = 0, 0
            t_targets_all, t_outputs_all = None, None
            # switch model to evaluation mode
            self.model.eval()
            with torch.no_grad():
                for t_batch, t_sample_batched in enumerate(data_loader):
                    t_inputs = [
                        t_sample_batched[col].to(self.opt.device)
                        for col in self.opt.inputs_cols
                    ]
                    t_targets = t_sample_batched['polarity'].to(
                        self.opt.device)
                    t_outputs = self.model(t_inputs)

                    n_correct += (torch.argmax(t_outputs,
                                               -1) == t_targets).sum().item()
                    n_total += len(t_outputs)

                    if t_targets_all is None:
                        t_targets_all = t_targets
                        t_outputs_all = t_outputs
                    else:
                        t_targets_all = torch.cat((t_targets_all, t_targets),
                                                  dim=0)
                        t_outputs_all = torch.cat((t_outputs_all, t_outputs),
                                                  dim=0)

            acc = n_correct / n_total
            f1 = metrics.f1_score(t_targets_all.cpu(),
                                  torch.argmax(t_outputs_all, -1).cpu(),
                                  labels=[0, 1, 2],
                                  average='macro')
            return acc, f1

        def run(self):
            # Loss and Optimizer
            criterion = nn.CrossEntropyLoss()
            _params = filter(lambda p: p.requires_grad,
                             self.model.parameters())
            optimizer = self.opt.optimizer(_params,
                                           lr=self.opt.learning_rate,
                                           weight_decay=self.opt.l2reg)

            train_data_loader = DataLoader(dataset=self.trainset,
                                           batch_size=self.opt.batch_size,
                                           shuffle=True)
            test_data_loader = DataLoader(dataset=self.testset,
                                          batch_size=self.opt.batch_size,
                                          shuffle=False)
            val_data_loader = DataLoader(dataset=self.valset,
                                         batch_size=self.opt.batch_size,
                                         shuffle=False)

            self._reset_params()
            best_model_path = self._train(criterion, optimizer,
                                          train_data_loader, val_data_loader)
            self.model.load_state_dict(torch.load(best_model_path))
            self.model.eval()
            test_acc, test_f1 = self._evaluate_acc_f1(test_data_loader)
            logger.info('>> test_acc: {:.4f}, test_f1: {:.4f}'.format(
                test_acc, test_f1))