def run_test(args):
    from pybert.io.task_data import TaskData
    from pybert.test.predictor import Predictor
    data = TaskData()
    ids, targets, sentences = data.read_data(
        raw_data_path=config['test_path'],
        preprocessor=ChinesePreProcessor(),
        is_train=False)
    lines = list(zip(sentences, targets))
    #print(ids,sentences)
    processor = BertProcessor(vocab_path=config['bert_vocab_path'],
                              do_lower_case=args.do_lower_case)
    label_list = processor.get_labels()
    id2label = {i: label for i, label in enumerate(label_list)}

    test_data = processor.get_test(lines=lines)
    test_examples = processor.create_examples(
        lines=test_data,
        example_type='test',
        cached_examples_file=config['data_dir'] /
        f"cached_test_examples_{args.arch}")
    test_features = processor.create_features(
        examples=test_examples,
        max_seq_len=args.eval_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_test_features_{}_{}".format(args.eval_max_seq_len, args.arch))
    test_dataset = processor.create_dataset(test_features)
    test_sampler = SequentialSampler(test_dataset)
    test_dataloader = DataLoader(test_dataset,
                                 sampler=test_sampler,
                                 batch_size=args.train_batch_size,
                                 collate_fn=collate_fn)
    model = BertForMultiLable.from_pretrained(config['checkpoint_dir'],
                                              num_labels=len(label_list))

    # ----------- predicting
    logger.info('model predicting....')
    predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu)
    result = predictor.predict(data=test_dataloader)
    ids = np.array(ids)
    df1 = pd.DataFrame(ids, index=None)
    df2 = pd.DataFrame(result, index=None)
    all_df = pd.concat([df1, df2], axis=1)

    all_df.columns = ['id', 'sg', 'pj']
    all_df['sg'] = all_df['sg'].apply(lambda x: 1 if x > 0.5 else 0)
    all_df['pj'] = all_df['pj'].apply(lambda x: 1 if x > 0.5 else 0)
    #all_df['qs'] = all_df['qs'].apply(lambda x: 1 if x>0.5 else 0)
    #all_df['tz'] = all_df['tz'].apply(lambda x: 1 if x>0.5 else 0)
    #all_df['ggjc'] = all_df['ggjc'].apply(lambda x: 1 if x>0.5 else 0)

    #all_df.columns = ['id','zy','gfgqzr','qs','tz','ggjc']
    #all_df['zy'] = all_df['zy'].apply(lambda x: 1 if x>0.5 else 0)
    #all_df['gfgqzr'] = all_df['gfgqzr'].apply(lambda x: 1 if x>0.5 else 0)
    #all_df['qs'] = all_df['qs'].apply(lambda x: 1 if x>0.5 else 0)
    #all_df['tz'] = all_df['tz'].apply(lambda x: 1 if x>0.5 else 0)
    #all_df['ggjc'] = all_df['ggjc'].apply(lambda x: 1 if x>0.5 else 0)
    all_df.to_csv(
        "/home/LAB/liqian/test/game/Fin/CCKS-Cls/test_output/cls_out.csv",
        index=False)
示例#2
0
def run_test(args):
    from pybert.io.task_data import TaskData
    from pybert.test.predictor import Predictor
    data = TaskData()
    # targets, sentences = data.read_data(raw_data_path=config['test_path'],
    #                                     preprocessor=EnglishPreProcessor(),
    #                                     is_train=False)
    _, _, targets, sentences = data.read_data(config, raw_data_path=config['test_path'],
                                        is_train=False)
    lines = list(zip(sentences, targets))
    # processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case)
    processor = BertProcessor()
    label_list = processor.get_labels()
    id2label = {i: label for i, label in enumerate(label_list)}

    test_data = processor.get_test(lines=lines)
    test_examples = processor.create_examples(lines=test_data,
                                              example_type='test',
                                              cached_examples_file=config[
                                            'data_dir'] / f"cached_test_examples_{args.arch}")
    test_features = processor.create_features(examples=test_examples,
                                              max_seq_len=args.eval_max_seq_len,
                                              cached_features_file=config[
                                            'data_dir'] / "cached_test_features_{}_{}".format(
                                                args.eval_max_seq_len, args.arch
                                              ))
    test_dataset = processor.create_dataset(test_features)
    test_sampler = SequentialSampler(test_dataset)
    test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size,
                                 collate_fn=collate_fn)
    model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list))

    # ----------- predicting
    logger.info('model predicting....')
    predictor = Predictor(model=model,
                          logger=logger,
                          n_gpu=args.n_gpu)
    result = predictor.predict(data=test_dataloader)
    result[result<0.5] = 0
    result[result>=0.5] = 1
    labels = []
    for i in range(result.shape[0]):
        ids = np.where(result[i]==1)[0]
        each_patent_label = [id2label[id] for id in ids]
        labels.append(each_patent_label)
    if os.path.exists(config['predictions']):
        os.remove(config['predictions'])
    with open(config['test_path'], 'r') as f:
        reader = csv.reader(f)
        for j, line in enumerate(reader):
            id = line[0]
            with open(config['predictions'], 'a+') as g:
                g.write("{}\t".format(id))
                for label in labels[j]:
                    g.write("{}\t".format(label))
                g.write("\n")
示例#3
0
def main(text,arch,max_seq_length,do_lower_case):
    processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=do_lower_case)
    label_list = processor.get_labels()
    id2label = {i: label for i, label in enumerate(label_list)}
    model = BertForMultiLable.from_pretrained(config['checkpoint_dir'] /f'{arch}', num_labels=len(label_list))
    tokens = processor.tokenizer.tokenize(text)
    if len(tokens) > max_seq_length - 2:
        tokens = tokens[:max_seq_length - 2]
    tokens = ['[CLS]'] + tokens + ['[SEP]']
    input_ids = processor.tokenizer.convert_tokens_to_ids(tokens)
    input_ids = torch.tensor(input_ids).unsqueeze(0)  # Batch size 1, 2 choices
    logits = model(input_ids)
    probs = logits.sigmoid()
    return probs.cpu().detach().numpy()[0]
def run_test(args):
    from pybert.io.task_data import TaskData
    from pybert.test.predictor import Predictor
    data = TaskData()
    targets, sentences = data.read_data(raw_data_path=config['test_path'],
                                        preprocessor=EnglishPreProcessor(),
                                        is_train=False)
    lines = list(zip(sentences, targets))
    processor = BertProcessor(vocab_path=config['bert_vocab_path'],
                              do_lower_case=args.do_lower_case)
    label_list = processor.get_labels()
    id2label = {i: label for i, label in enumerate(label_list)}

    test_data = processor.get_test(lines=lines)
    test_examples = processor.create_examples(
        lines=test_data,
        example_type='test',
        cached_examples_file=config['data_dir'] /
        f"cached_test_examples_{args.arch}")
    test_features = processor.create_features(
        examples=test_examples,
        max_seq_len=args.eval_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_test_features_{}_{}".format(args.eval_max_seq_len, args.arch))
    test_dataset = processor.create_dataset(test_features)
    test_sampler = SequentialSampler(test_dataset)
    test_dataloader = DataLoader(test_dataset,
                                 sampler=test_sampler,
                                 batch_size=args.train_batch_size,
                                 collate_fn=collate_fn)
    model = BertForMultiLable.from_pretrained(config['checkpoint_dir'],
                                              num_labels=len(label_list))

    # ----------- predicting
    logger.info('model predicting....')
    predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu)
    result = predictor.predict(data=test_dataloader)
    print(result)
def run_train(args):
    # --------- data
    processor = BertProcessor(vocab_path=config['bert_vocab_path'],
                              do_lower_case=args.do_lower_case)
    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}
    train_data = processor.get_train(config['data_dir'] /
                                     f"{args.data_name}.train.pkl")
    train_examples = processor.create_examples(
        lines=train_data,
        example_type='train',
        cached_examples_file=config['data_dir'] /
        f"cached_train_examples_{args.arch}")
    train_features = processor.create_features(
        examples=train_examples,
        max_seq_len=args.train_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_train_features_{}_{}".format(args.train_max_seq_len,
                                             args.arch))
    train_dataset = processor.create_dataset(train_features,
                                             is_sorted=args.sorted)
    if args.sorted:
        train_sampler = SequentialSampler(train_dataset)
    else:
        train_sampler = RandomSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size,
                                  collate_fn=collate_fn)

    valid_data = processor.get_dev(config['data_dir'] /
                                   f"{args.data_name}.valid.pkl")
    valid_examples = processor.create_examples(
        lines=valid_data,
        example_type='valid',
        cached_examples_file=config['data_dir'] /
        f"cached_valid_examples_{args.arch}")

    valid_features = processor.create_features(
        examples=valid_examples,
        max_seq_len=args.eval_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_valid_features_{}_{}".format(args.eval_max_seq_len, args.arch))
    valid_dataset = processor.create_dataset(valid_features)
    valid_sampler = SequentialSampler(valid_dataset)
    valid_dataloader = DataLoader(valid_dataset,
                                  sampler=valid_sampler,
                                  batch_size=args.eval_batch_size,
                                  collate_fn=collate_fn)

    # ------- model
    logger.info("initializing model")
    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = BertForMultiLable.from_pretrained(args.resume_path,
                                                  num_labels=len(label_list))
    else:
        model = BertForMultiLable.from_pretrained(config['bert_model_dir'],
                                                  num_labels=len(label_list))
    t_total = int(
        len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)

    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        args.weight_decay
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    warmup_steps = int(t_total * args.warmup_proportion)

    optimizer = AdamW(optimizer_grouped_parameters,
                      lr=args.learning_rate,
                      eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=warmup_steps,
                                                num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args.fp16_opt_level)
    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'],
                                    arch=args.arch)
    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],
                                       mode=args.mode,
                                       monitor=args.monitor,
                                       arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info(
        "  Total train batch size (w. parallel, distributed & accumulation) = %d",
        args.train_batch_size * args.gradient_accumulation_steps *
        (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d",
                args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(args=args,
                      model=model,
                      logger=logger,
                      criterion=BCEWithLogLoss(),
                      optimizer=optimizer,
                      scheduler=scheduler,
                      early_stopping=None,
                      training_monitor=train_monitor,
                      model_checkpoint=model_checkpoint,
                      batch_metrics=[AccuracyThresh(thresh=0.5)],
                      epoch_metrics=[
                          AUC(average='micro', task_type='binary'),
                          MultiLabelReport(id2label=id2label),
                          F1Score(task_type='binary',
                                  average='micro',
                                  search_thresh=True)
                      ])
    trainer.train(train_data=train_dataloader, valid_data=valid_dataloader)
示例#6
0
def run_train(args, data_names):
    # --------- data
    # processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case)
    processor = BertProcessor()
    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    # train_data = processor.get_train(config['data_dir'] / f"{data_name}.train.pkl")
    # train_examples = processor.create_examples(lines=train_data,
    #                                            example_type='train',
    #                                            cached_examples_file=config[
    #                                                 'data_dir'] / f"cached_train_examples_{args.arch}")
    # train_features = processor.create_features(examples=train_examples,
    #                                            max_seq_len=args.train_max_seq_len,
    #                                            cached_features_file=config[
    #                                                 'data_dir'] / "cached_train_features_{}_{}".format(
    #                                                args.train_max_seq_len, args.arch
    #                                            ))
    # train_dataset = processor.create_dataset(train_features, is_sorted=args.sorted)
    # if args.sorted:
    #     train_sampler = SequentialSampler(train_dataset)
    # else:
    #     train_sampler = RandomSampler(train_dataset)
    # train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,
    #                               collate_fn=collate_fn)
    #
    # valid_data = processor.get_dev(config['data_dir'] / f"{data_name}.valid.pkl")
    # valid_examples = processor.create_examples(lines=valid_data,
    #                                            example_type='valid',
    #                                            cached_examples_file=config[
    #                                             'data_dir'] / f"cached_valid_examples_{args.arch}")
    #
    # valid_features = processor.create_features(examples=valid_examples,
    #                                            max_seq_len=args.eval_max_seq_len,
    #                                            cached_features_file=config[
    #                                             'data_dir'] / "cached_valid_features_{}_{}".format(
    #                                                args.eval_max_seq_len, args.arch
    #                                            ))
    # valid_dataset = processor.create_dataset(valid_features)
    # valid_sampler = SequentialSampler(valid_dataset)
    # valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size,
    #                               collate_fn=collate_fn)

    # ------- model
    logger.info("initializing model")
    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = BertForMultiLable.from_pretrained(args.resume_path, num_labels=len(label_list))
    else:
        # model = BertForMultiLable.from_pretrained(config['bert_model_dir'], num_labels=len(label_list))
        model = BertForMultiLable.from_pretrained("bert-base-multilingual-cased", num_labels=len(label_list))
    #t_total = int(len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)
    t_total = 200000
  
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
         {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],'weight_decay': args.weight_decay},
         {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    warmup_steps = int(t_total * args.warmup_proportion)
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                                   num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'], arch=args.arch)
    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],mode=args.mode,
                                       monitor=args.monitor,arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    #logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                args.train_batch_size * args.gradient_accumulation_steps * (
                    torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(args= args,model=model,logger=logger,criterion=BCEWithLogLoss(),optimizer=optimizer,
                      scheduler=scheduler,early_stopping=None,training_monitor=train_monitor,
                      model_checkpoint=model_checkpoint,
                      batch_metrics=[AccuracyThresh(thresh=0.5)],
                      epoch_metrics=[AUC(average='micro', task_type='binary'),
                                     MultiLabelReport(id2label=id2label),
                                     F1Score(average='micro', task_type='binary')])

    trainer.model.zero_grad()
    seed_everything(trainer.args.seed)  # Added here for reproductibility (even between python 2 a
    
    iter_num = 0
    valid_dataloader = get_valid_dataloader(args)
    for epoch in range(trainer.start_epoch, trainer.start_epoch + trainer.args.epochs):
        trainer.logger.info(f"Epoch {epoch}/{trainer.args.epochs}")
        update_epoch = True

        for i, data_name in enumerate(data_names):
            filename_int = int(data_name)
            if filename_int > 3500:
                continue
            trainer.logger.info(f"Epoch {epoch} - summary {i+1}/{len(data_names)}"+ f": summary_{data_name}")
            # train_dataloader, valid_dataloader = get_dataloader(args, data_name)
            train_dataloader = get_dataloader(args, data_name)
            # train_log, valid_log = trainer.train(train_data=train_dataloader, valid_data=valid_dataloader, epoch=update_epoch)
            train_log = trainer.train(train_data=train_dataloader, epoch=update_epoch)
            update_epoch = False

            # if train_log == None:
            #     continue
            
            iter_num += 1

            # logs = dict(train_log)
            # show_info = f'\nEpoch: {epoch} - ' + "-".join([f' {key}: {value:.4f} ' for key, value in logs.items()])
            # trainer.logger.info(show_info)


            if iter_num % 50 == 0:
                valid_log = trainer.valid_epoch(valid_dataloader)
                logs = dict(valid_log)
                show_info = f'\nEpoch: {epoch} - ' + "-".join([f' {key}: {value:.4f} ' for key, value in logs.items()])
                trainer.logger.info(show_info)

                # save
                if trainer.training_monitor:
                    trainer.training_monitor.epoch_step(logs)

            # save model
            if trainer.model_checkpoint:
                if iter_num % 50 == 0:
                #     state = trainer.save_info(epoch, best=logs[trainer.model_checkpoint.monitor])
                    state = trainer.save_info(iter_num, best=logs[trainer.model_checkpoint.monitor])
                    trainer.model_checkpoint.bert_epoch_step(current=logs[trainer.model_checkpoint.monitor], state=state)

            # early_stopping
            if trainer.early_stopping:
                trainer.early_stopping.epoch_step(epoch=epoch, current=logs[trainer.early_stopping.monitor])
                if trainer.early_stopping.stop_training:
                    break
def run_test(args):
    # TODO: 对训练集使用micro F1-score进行结果评测
    from pybert.io.task_data import TaskData
    from pybert.test.predictor import Predictor
    data = TaskData()
    ids, targets, sentences = data.read_data(
        raw_data_path=config['test_path'],
        preprocessor=ChinesePreProcessor(),
        is_train=True)  # 设置为True
    lines = list(zip(sentences, targets))
    #print(ids,sentences)
    processor = BertProcessor(vocab_path=config['bert_vocab_path'],
                              do_lower_case=args.do_lower_case)
    label_list = processor.get_labels(args.task_type)
    id2label = {i: label for i, label in enumerate(label_list)}

    test_data = processor.get_test(lines=lines)
    test_examples = processor.create_examples(
        lines=test_data,
        example_type=f'test_{args.task_type}',
        cached_examples_file=config['data_dir'] /
        f"cached_test_{args.task_type}_examples_{args.arch}")
    test_features = processor.create_features(
        examples=test_examples,
        max_seq_len=args.eval_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_test_{}_features_{}_{}".format(
            args.task_type, args.eval_max_seq_len, args.arch))
    test_dataset = processor.create_dataset(test_features)
    test_sampler = SequentialSampler(test_dataset)
    test_dataloader = DataLoader(test_dataset,
                                 sampler=test_sampler,
                                 batch_size=args.train_batch_size,
                                 collate_fn=collate_fn)
    model = None
    if args.task_type == 'base':
        model = BertForMultiLable.from_pretrained(config['checkpoint_dir'],
                                                  num_labels=len(label_list))
    else:
        # model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list))
        model = BertForMultiLable_Fewshot.from_pretrained(
            config['checkpoint_dir'], num_labels=len(label_list))

    # ----------- predicting
    logger.info('model predicting....')
    predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu)
    result = predictor.predict(data=test_dataloader)  # 感觉这个变量名叫all_logits可能更好
    # TODO: 计算F1-score,这个功能模块需要用代码测试一下~
    f1_metric = F1Score(task_type='binary',
                        average='micro',
                        search_thresh=True)
    all_logits = torch.tensor(result, dtype=torch.float)  # 转换成tensor
    all_labels = torch.tensor(targets, dtype=torch.long)  # 转换成tensor
    f1_metric(all_logits, all_labels)  # 会自动打印结果
    print(f1_metric.value())
    # 将结果写入一个文件之中
    with open('test_output/test.log', 'a+') as f:
        f.write(str(f1_metric.value()) + "\n")
    thresh = f1_metric.thresh

    ids = np.array(ids)
    df1 = pd.DataFrame(ids, index=None)
    df2 = pd.DataFrame(result, index=None)
    all_df = pd.concat([df1, df2], axis=1)
    if args.task_type == 'base':
        all_df.columns = ['id', 'zy', 'gfgqzr', 'qs', 'tz', 'ggjc']
    else:
        all_df.columns = ['id', 'sg', 'pj', 'zb', 'qsht', 'db']
    for column in all_df.columns[1:]:
        all_df[column] = all_df[column].apply(lambda x: 1 if x > thresh else 0)
    # all_df['zy'] = all_df['zy'].apply(lambda x: 1 if x>thresh else 0)
    # all_df['gfgqzr'] = all_df['gfgqzr'].apply(lambda x: 1 if x>thresh else 0)
    # all_df['qs'] = all_df['qs'].apply(lambda x: 1 if x>thresh else 0)
    # all_df['tz'] = all_df['tz'].apply(lambda x: 1 if x>thresh else 0)
    # all_df['ggjc'] = all_df['ggjc'].apply(lambda x: 1 if x>thresh else 0)
    all_df.to_csv(f"test_output/{args.task_type}/cls_out.csv", index=False)