def run_test(args): from pybert.io.task_data import TaskData from pybert.test.predictor import Predictor data = TaskData() targets, sentences = data.read_data(raw_data_path=config['test_path'], preprocessor=EnglishPreProcessor(), is_train=False) lines = list(zip(sentences, targets)) processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels() id2label = {i: label for i, label in enumerate(label_list)} test_data = processor.get_test(lines=lines) test_examples = processor.create_examples(lines=test_data, example_type='test', cached_examples_file=config[ 'data_dir'] / f"cached_test_examples_{args.arch}") test_features = processor.create_features(examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config[ 'data_dir'] / "cached_test_features_{}_{}".format( args.eval_max_seq_len, args.arch )) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size) model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) # ----------- predicting logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu) result = predictor.predict(data=test_dataloader) print(result)
def run_test(args): from pybert.io.task_data import TaskData from pybert.test.predictor import Predictor data = TaskData() ids, targets, sentences = data.read_data( raw_data_path=config['test_path'], preprocessor=ChinesePreProcessor(), is_train=False) lines = list(zip(sentences, targets)) #print(ids,sentences) processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels() id2label = {i: label for i, label in enumerate(label_list)} test_data = processor.get_test(lines=lines) test_examples = processor.create_examples( lines=test_data, example_type='test', cached_examples_file=config['data_dir'] / f"cached_test_examples_{args.arch}") test_features = processor.create_features( examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config['data_dir'] / "cached_test_features_{}_{}".format(args.eval_max_seq_len, args.arch)) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) # ----------- predicting logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu) result = predictor.predict(data=test_dataloader) ids = np.array(ids) df1 = pd.DataFrame(ids, index=None) df2 = pd.DataFrame(result, index=None) all_df = pd.concat([df1, df2], axis=1) all_df.columns = ['id', 'sg', 'pj'] all_df['sg'] = all_df['sg'].apply(lambda x: 1 if x > 0.5 else 0) all_df['pj'] = all_df['pj'].apply(lambda x: 1 if x > 0.5 else 0) #all_df['qs'] = all_df['qs'].apply(lambda x: 1 if x>0.5 else 0) #all_df['tz'] = all_df['tz'].apply(lambda x: 1 if x>0.5 else 0) #all_df['ggjc'] = all_df['ggjc'].apply(lambda x: 1 if x>0.5 else 0) #all_df.columns = ['id','zy','gfgqzr','qs','tz','ggjc'] #all_df['zy'] = all_df['zy'].apply(lambda x: 1 if x>0.5 else 0) #all_df['gfgqzr'] = all_df['gfgqzr'].apply(lambda x: 1 if x>0.5 else 0) #all_df['qs'] = all_df['qs'].apply(lambda x: 1 if x>0.5 else 0) #all_df['tz'] = all_df['tz'].apply(lambda x: 1 if x>0.5 else 0) #all_df['ggjc'] = all_df['ggjc'].apply(lambda x: 1 if x>0.5 else 0) all_df.to_csv( "/home/LAB/liqian/test/game/Fin/CCKS-Cls/test_output/cls_out.csv", index=False)
def run_test(args): from pybert.io.task_data import TaskData from pybert.test.predictor import Predictor import pickle import os processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels() label2id = {label: i for i, label in enumerate(label_list)} id2label = {i: label for i, label in enumerate(label_list)} test_data = processor.get_train(config['data_dir'] / f"{args.data_name}.test.pkl") print ("Test data is:") print (test_data) print ("Label list is:") print (label_list) print ("----------------------------------------") # test_data = processor.get_test(lines=lines) test_examples = processor.create_examples(lines=test_data, example_type='test', cached_examples_file=config[ 'data_cache'] / f"cached_test_examples_{args.arch}") test_features = processor.create_features(examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config[ 'data_cache'] / "cached_test_features_{}_{}".format( args.eval_max_seq_len, args.arch )) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size) model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) # ----------- predicting logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu, batch_metrics=[AccuracyThresh(thresh=0.5)], epoch_metrics=[AUC(average='micro', task_type='binary'), MultiLabelReport(id2label=id2label)]) result, test_predicted, test_true = predictor.predict(data=test_dataloader) pickle.dump(test_true, open(os.path.join(config["test/checkpoint_dir"], "test_true.p"), "wb")) pickle.dump(test_predicted, open(os.path.join(config["test/checkpoint_dir"], "test_predicted.p"), "wb")) pickle.dump(id2label, open(os.path.join(config["test/checkpoint_dir"], "id2label.p"), "wb")) print ("Predictor results:") print(result) print ("-----------------------------------------------")
def run_test(args): from pybert.io.task_data import TaskData from pybert.test.predictor import Predictor data = TaskData() # targets, sentences = data.read_data(raw_data_path=config['test_path'], # preprocessor=EnglishPreProcessor(), # is_train=False) _, _, targets, sentences = data.read_data(config, raw_data_path=config['test_path'], is_train=False) lines = list(zip(sentences, targets)) # processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) processor = BertProcessor() label_list = processor.get_labels() id2label = {i: label for i, label in enumerate(label_list)} test_data = processor.get_test(lines=lines) test_examples = processor.create_examples(lines=test_data, example_type='test', cached_examples_file=config[ 'data_dir'] / f"cached_test_examples_{args.arch}") test_features = processor.create_features(examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config[ 'data_dir'] / "cached_test_features_{}_{}".format( args.eval_max_seq_len, args.arch )) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) # ----------- predicting logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu) result = predictor.predict(data=test_dataloader) result[result<0.5] = 0 result[result>=0.5] = 1 labels = [] for i in range(result.shape[0]): ids = np.where(result[i]==1)[0] each_patent_label = [id2label[id] for id in ids] labels.append(each_patent_label) if os.path.exists(config['predictions']): os.remove(config['predictions']) with open(config['test_path'], 'r') as f: reader = csv.reader(f) for j, line in enumerate(reader): id = line[0] with open(config['predictions'], 'a+') as g: g.write("{}\t".format(id)) for label in labels[j]: g.write("{}\t".format(label)) g.write("\n")
def run_test(args): from pybert.test.predictor import Predictor processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) test_data = processor.get_test(config['test_path']) test_examples = processor.create_examples( lines=test_data, example_type='test', cached_examples_file=config['data_dir'] / f"cached_test_examples_{args.arch}") test_features = processor.create_features( examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config['data_dir'] / "cached_test_features_{}_{}".format(args.eval_max_seq_len, args.arch)) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size) idx2word = {} for (w, i) in processor.tokenizer.vocab.items(): idx2word[i] = w label_list = processor.get_labels(label_path=config['data_label_path']) idx2label = {i: label for i, label in enumerate(label_list)} if args.test_path: args.test_path = Path(args.test_path) model = BertForMultiLable.from_pretrained(args.test_path, num_labels=len(label_list)) else: model = BertForMultiLable.from_pretrained(config['bert_model_dir'], num_labels=len(label_list)) for p in model.bert.parameters(): p.require_grad = False # ----------- predicting ----------- writer = SummaryWriter() logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu, i2w=idx2word, i2l=idx2label) result = predictor.predict(data=test_dataloader) if args.predict_labels: predictor.labels(result, args.predict_idx)
def run_test(args): from pybert.io.task_data import TaskData from pybert.test.predictor import Predictor data = TaskData() ids,targets, sentences = data.read_data(raw_data_path=config['test_path'], preprocessor=None, is_train=False) lines = list(zip(sentences, targets)) processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels() id2label = {i: label for i, label in enumerate(label_list)} test_data = processor.get_test(lines=lines) test_examples = processor.create_examples(lines=test_data, example_type='test', cached_examples_file=config[ 'data_dir'] / f"cached_test_examples_{args.arch}") test_features = processor.create_features(examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config[ 'data_dir'] / "cached_test_features_{}_{}".format( args.eval_max_seq_len, args.arch )) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size) model = BertForMultiClass.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) # ----------- predicting logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu) result = predictor.predict(data=test_dataloader) import numpy as np result=np.argmax(result,axis=1) with open('submit1.csv','w',encoding='utf-8') as f: for id,pre in zip(ids,result): f.write(id+','+str(pre)+'\n') print(result)
def run_test(args, test=False, k=7, med_map='pybert/dataset/med_map.csv'): from pybert.io.task_data import TaskData from pybert.test.predictor import Predictor data = TaskData() targets, sentences = data.read_data(raw_data_path=config['test_path'], preprocessor=EnglishPreProcessor(), is_train=test) print( f'-----------------------------------------\ntargets {targets}\n---------------------------------------------------' ) lines = list(zip(sentences, targets)) processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels() id2label = {i: label for i, label in enumerate(label_list)} test_data = processor.get_test(lines=lines) test_examples = processor.create_examples( lines=test_data, example_type='test', cached_examples_file=config['data_dir'] / f"cached_test_examples_{args.arch}") test_features = processor.create_features( examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config['data_dir'] / "cached_test_features_{}_{}".format(args.eval_max_seq_len, args.arch)) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size) model = BertForMultiLable.from_pretrained(config['checkpoint_dir']) # ----------- predicting logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu, test=test) if test: results, targets = predictor.predict(data=test_dataloader) #print(f'results {results.shape}') #print(f'targets {targets.shape}') result = dict() metrics = [Recall(), Acc()] for metric in metrics: metric.reset() metric(logits=results, target=targets) value = metric.value() if value is not None: result[f'valid_{metric.name()}'] = value return result else: results = predictor.predict(data=test_dataloader) pred = np.argsort(results)[:, -k:][:, ::-1] with open('pybert/dataset/med_map.csv', mode='r') as infile: reader = csv.reader(infile) med_dict = {int(rows[0]): rows[1] for rows in reader} pred = np.vectorize(med_dict.get)(pred) return pred
def run_test(args): # TODO: 对训练集使用micro F1-score进行结果评测 from pybert.io.task_data import TaskData from pybert.test.predictor import Predictor data = TaskData() ids, targets, sentences = data.read_data( raw_data_path=config['test_path'], preprocessor=ChinesePreProcessor(), is_train=True) # 设置为True lines = list(zip(sentences, targets)) #print(ids,sentences) processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels(args.task_type) id2label = {i: label for i, label in enumerate(label_list)} test_data = processor.get_test(lines=lines) test_examples = processor.create_examples( lines=test_data, example_type=f'test_{args.task_type}', cached_examples_file=config['data_dir'] / f"cached_test_{args.task_type}_examples_{args.arch}") test_features = processor.create_features( examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config['data_dir'] / "cached_test_{}_features_{}_{}".format( args.task_type, args.eval_max_seq_len, args.arch)) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) model = None if args.task_type == 'base': model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) else: # model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) model = BertForMultiLable_Fewshot.from_pretrained( config['checkpoint_dir'], num_labels=len(label_list)) # ----------- predicting logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu) result = predictor.predict(data=test_dataloader) # 感觉这个变量名叫all_logits可能更好 # TODO: 计算F1-score,这个功能模块需要用代码测试一下~ f1_metric = F1Score(task_type='binary', average='micro', search_thresh=True) all_logits = torch.tensor(result, dtype=torch.float) # 转换成tensor all_labels = torch.tensor(targets, dtype=torch.long) # 转换成tensor f1_metric(all_logits, all_labels) # 会自动打印结果 print(f1_metric.value()) # 将结果写入一个文件之中 with open('test_output/test.log', 'a+') as f: f.write(str(f1_metric.value()) + "\n") thresh = f1_metric.thresh ids = np.array(ids) df1 = pd.DataFrame(ids, index=None) df2 = pd.DataFrame(result, index=None) all_df = pd.concat([df1, df2], axis=1) if args.task_type == 'base': all_df.columns = ['id', 'zy', 'gfgqzr', 'qs', 'tz', 'ggjc'] else: all_df.columns = ['id', 'sg', 'pj', 'zb', 'qsht', 'db'] for column in all_df.columns[1:]: all_df[column] = all_df[column].apply(lambda x: 1 if x > thresh else 0) # all_df['zy'] = all_df['zy'].apply(lambda x: 1 if x>thresh else 0) # all_df['gfgqzr'] = all_df['gfgqzr'].apply(lambda x: 1 if x>thresh else 0) # all_df['qs'] = all_df['qs'].apply(lambda x: 1 if x>thresh else 0) # all_df['tz'] = all_df['tz'].apply(lambda x: 1 if x>thresh else 0) # all_df['ggjc'] = all_df['ggjc'].apply(lambda x: 1 if x>thresh else 0) all_df.to_csv(f"test_output/{args.task_type}/cls_out.csv", index=False)