Ejemplo n.º 1
0
Archivo: train.py Proyecto: yyq90/tclm
def eval(opt):
    model_path = opt.model_path
    processors = {
        'oce': (OceProcessor, 'OCEMOTION'),
        'tnews': (TnewsProcessor, 'TNEWS'),
        'ocnli': (OcnliProcessor, 'OCNLI')
    }
    model = build_model(opt.bert_dir)
    for task in processors.keys():
        logger.info(
            f'------------------------------------------------------------Task : {task} eval ---------------------------------------------------------------'
        )
        processor = processors[task][0]()
        dev_examples = None
        file_dev = processors[task][1] + '_dev_s.csv'
        dev_raw_examples = processor.load_raw(
            os.path.join(opt.raw_data_dir, file_dev))
        dev_examples = processor.get_dev_examples(dev_raw_examples)
        dev_features = convert_examples_to_features(task, dev_examples,
                                                    opt.bert_dir,
                                                    opt.max_seq_len)
        dev_dataset = build_dataset(task, dev_features, 'dev')

        dev_loader = DataLoader(dev_dataset,
                                batch_size=opt.eval_batch_size,
                                shuffle=False,
                                num_workers=0)

        model, device = load_model_and_parallel(model,
                                                opt.gpu_ids[0],
                                                ckpt_path=model_path)
        pred_logits = None
        labels = None
        for tmp_pred in get_base_out(task, model, dev_loader, device):
            pred = tmp_pred[0].cpu().numpy()
            label = tmp_pred[1].cpu().numpy()

            if pred_logits is None:
                pred_logits = pred
            else:
                pred_logits = np.append(pred_logits, pred, axis=0)

            if labels is None:
                labels = label
            else:
                labels = np.append(labels, label, axis=0)

        preds = np.argmax(pred_logits, -1)
        macro_f1_score = f1_score(labels, preds, average='macro')
        logger.info(f'\nmacro f1 at {model_path} is {macro_f1_score}')
        logger.info(f'\nconfusion_matrix:')
        cm_res = confusion_matrix(
            labels,
            preds,
        )
        logger.info(f'{cm_res}')
        logger.info(f'{classification_report(labels,preds,)}')
def train_base(opt, info_dict, train_examples, dev_info=None):
    feature_para, dataset_para, model_para = prepare_para_dict(opt, info_dict)

    train_features = convert_examples_to_features(opt.task_type,
                                                  train_examples, opt.bert_dir,
                                                  opt.max_seq_len,
                                                  **feature_para)

    logger.info(f'Build {len(train_features)} train features')

    train_dataset = build_dataset(opt.task_type, train_features, 'train',
                                  **dataset_para)

    model = build_model(opt.task_type, opt.bert_dir, **model_para)

    train(opt, model, train_dataset)

    if dev_info is not None:
        dev_examples, dev_callback_info = dev_info

        dev_features = convert_examples_to_features(opt.task_type,
                                                    dev_examples, opt.bert_dir,
                                                    opt.max_seq_len,
                                                    **feature_para)

        logger.info(f'Build {len(dev_features)} dev features')

        dev_dataset = build_dataset(opt.task_type, dev_features, 'dev',
                                    **dataset_para)

        dev_loader = DataLoader(dev_dataset,
                                batch_size=opt.eval_batch_size,
                                shuffle=False,
                                num_workers=8)

        dev_info = (dev_loader, dev_callback_info)

        model_path_list = get_model_path_list(opt.output_dir)

        metric_str = ''

        max_f1 = 0.
        max_f1_step = 0

        for idx, model_path in enumerate(model_path_list):

            tmp_step = model_path.split('/')[-2].split('-')[-1]

            model, device = load_model_and_parallel(model,
                                                    opt.gpu_ids[0],
                                                    ckpt_path=model_path)

            if opt.task_type == 'trigger':

                tmp_metric_str, tmp_f1 = trigger_evaluation(
                    model,
                    dev_info,
                    device,
                    start_threshold=opt.start_threshold,
                    end_threshold=opt.end_threshold)

            elif opt.task_type == 'role1':
                tmp_metric_str, tmp_f1 = role1_evaluation(
                    model,
                    dev_info,
                    device,
                    start_threshold=opt.start_threshold,
                    end_threshold=opt.end_threshold)
            elif opt.task_type == 'role2':
                tmp_metric_str, tmp_f1 = role2_evaluation(
                    model, dev_info, device)
            else:
                tmp_metric_str, tmp_f1 = attribution_evaluation(
                    model,
                    dev_info,
                    device,
                    polarity2id=info_dict['polarity2id'],
                    tense2id=info_dict['tense2id'])

            logger.info(f'In step {tmp_step}: {tmp_metric_str}')

            metric_str += f'In step {tmp_step}: {tmp_metric_str}' + '\n\n'

            if tmp_f1 > max_f1:
                max_f1 = tmp_f1
                max_f1_step = tmp_step

        max_metric_str = f'Max f1 is: {max_f1}, in step {max_f1_step}'

        logger.info(max_metric_str)

        metric_str += max_metric_str + '\n'

        eval_save_path = os.path.join(opt.output_dir, 'eval_metric.txt')

        with open(eval_save_path, 'a', encoding='utf-8') as f1:
            f1.write(metric_str)
Ejemplo n.º 3
0
def train(task, opt, model, train_dataset):

    train_sampler = RandomSampler(train_dataset)

    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=opt.train_batch_size,
        sampler=train_sampler,
    )
    # num_workers=8)

    model, device = load_model_and_parallel(model, opt.gpu_ids, opt.ckpt_path)

    use_n_gpus = False
    if hasattr(model, "module"):
        use_n_gpus = True

    t_total = len(train_loader) * opt.train_epochs

    optimizer, scheduler = build_optimizer_and_scheduler(opt, model, t_total)

    # Train
    logger.info("***** Running training *****")
    logger.info(f"  Num Examples = {len(train_dataset)}")
    logger.info(f"  Num Epochs = {opt.train_epochs}")
    logger.info(f"  Total training batch size = {opt.train_batch_size}")
    logger.info(f"  Total optimization steps = {t_total}")

    global_step = 0

    model.zero_grad()

    fgm, pgd = None, None

    attack_train_mode = opt.attack_train.lower()
    if attack_train_mode == 'fgm':
        fgm = FGM(model=model)
    elif attack_train_mode == 'pgd':
        pgd = PGD(model=model)
    pgd_k = 3

    save_steps = t_total // opt.train_epochs
    eval_steps = save_steps

    logger.info(
        f'Save model in {save_steps} steps; Eval model in {eval_steps} steps')
    log_loss_steps = 20
    avg_loss = 0.
    for epoch in range(opt.train_epochs):
        for step, batch_data in enumerate(train_loader):
            model.train()
            for key in batch_data.keys():
                if key == 'task':
                    continue
                batch_data[key] = batch_data[key].to(device)

            if task == 'total' or not task:
                loss = model(**batch_data)[0]
            else:
                loss = model(task=task, **batch_data)[0]

            if use_n_gpus:
                loss = loss.mean()
            loss.backward()

            if fgm is not None:
                fgm.attack()

                if task == 'total' or not task:
                    loss_adv = model(**batch_data)[0]
                else:
                    loss_adv = model(task=task, **batch_data)[0]

                if use_n_gpus:
                    loss_adv = loss_adv.mean()
                loss_adv.backward()
                fgm.restore()

            elif pgd is not None:
                pgd.backup_grad()
                for _t in range(pgd_k):
                    pgd.attack(is_first_attack=(_t == 0))
                    if _t != pgd_k - 1:
                        model.zero_grad()
                    else:
                        pgd.restore_grad()

                    if task == 'total' or not task:
                        loss_adv = model(**batch_data)[0]
                    else:
                        loss_adv = model(task=task, **batch_data)[0]

                    if use_n_gpus:
                        loss_adv = loss_adv.mean()
                    loss_adv.backward()

                pgd.restore()
            torch.nn.utils.clip_grad_norm_(model.parameters(),
                                           opt.max_grad_norm)

            optimizer.step()
            scheduler.step()
            model.zero_grad()

            global_step += 1

            if global_step % log_loss_steps == 0:
                avg_loss /= log_loss_steps
                logger.info('Step: %d / %d ----> total loss: %.5f' %
                            (global_step, t_total, avg_loss))
                avg_loss = 0.
            else:
                avg_loss += loss.item()
            if global_step % save_steps == 0:
                save_model(opt, model, global_step)

    # clear cuda cache to avoid OOM
    torch.cuda.empty_cache()

    logger.info('Train done')
Ejemplo n.º 4
0
def pipeline_predict(opt):
    """
    pipeline predict the submit results
    """
    if not os.path.exists(opt.submit_dir):
        os.mkdir(opt.submit_dir)

    submit = []

    with open(os.path.join(opt.raw_data_dir, 'test.json'),
              encoding='utf-8') as f:
        text_examples = json.load(f)

    tokenizer = BertTokenizer.from_pretrained(opt.bert_dir)

    trigger_model = TriggerExtractor(
        bert_dir=opt.bert_dir, use_distant_trigger=opt.use_distant_trigger)

    role1_model = Role1Extractor(
        bert_dir=opt.bert_dir,
        use_trigger_distance=opt.role1_use_trigger_distance)

    role2_model = Role2Extractor(
        bert_dir=opt.bert_dir,
        use_trigger_distance=opt.role2_use_trigger_distance)

    attribution_model = AttributionClassifier(bert_dir=opt.bert_dir)

    logger.info('Load models')
    trigger_model, device = load_model_and_parallel(trigger_model,
                                                    opt.gpu_ids[0],
                                                    ckpt_path=os.path.join(
                                                        opt.trigger_ckpt_dir,
                                                        'model.pt'))

    role1_model, _ = load_model_and_parallel(role1_model,
                                             opt.gpu_ids[0],
                                             ckpt_path=os.path.join(
                                                 opt.role1_ckpt_dir,
                                                 'model.pt'))

    role2_model, _ = load_model_and_parallel(role2_model,
                                             opt.gpu_ids[0],
                                             ckpt_path=os.path.join(
                                                 opt.role2_ckpt_dir,
                                                 'model.pt'))

    attribution_model, _ = load_model_and_parallel(
        attribution_model,
        opt.gpu_ids[0],
        ckpt_path=os.path.join(opt.attribution_ckpt_dir, 'model.pt'))

    id2role = {ROLE2_TO_ID[key]: key for key in ROLE2_TO_ID.keys()}

    start_threshold = opt.role1_start_threshold
    end_threshold = opt.role1_end_threshold

    with open(os.path.join(opt.mid_data_dir, f'polarity2id.json'),
              encoding='utf-8') as f:
        polarity2id = json.load(f)
    with open(os.path.join(opt.mid_data_dir, f'tense2id.json'),
              encoding='utf-8') as f:
        tense2id = json.load(f)

    polarity2id = polarity2id['map']
    tense2id = tense2id['map']

    id2polarity = {polarity2id[key]: key for key in polarity2id.keys()}
    id2tense = {tense2id[key]: key for key in tense2id.keys()}

    counts = 0
    with torch.no_grad():
        trigger_model.eval()
        role1_model.eval()
        role2_model.eval()
        attribution_model.eval()

        for _ex in tqdm(text_examples, desc='decode test examples'):
            distant_triggers = _ex['distant_triggers']

            tmp_instance = {'sentence': _ex['sentence'], 'words': _ex['words']}

            tmp_text = _ex['sentence']
            tmp_text_tokens = fine_grade_tokenize(tmp_text, tokenizer)

            assert len(tmp_text) == len(tmp_text_tokens)

            trigger_encode_dict = tokenizer.encode_plus(
                text=tmp_text_tokens,
                max_length=512,
                pad_to_max_length=False,
                is_pretokenized=True,
                return_token_type_ids=True,
                return_attention_mask=True,
                return_tensors='pt')

            tmp_base_inputs = {
                'token_ids': trigger_encode_dict['input_ids'],
                'attention_masks': trigger_encode_dict['attention_mask'],
                'token_type_ids': trigger_encode_dict['token_type_ids']
            }

            trigger_inputs = copy.deepcopy(tmp_base_inputs)

            # 构造 test 里的 distant trigger
            if opt.use_distant_trigger:
                distant_trigger_label = [0] * len(tmp_text)
                for _trigger in distant_triggers:
                    tmp_trigger_tokens = fine_grade_tokenize(
                        _trigger, tokenizer)
                    tmp_index_list = search_label_index(
                        tmp_text_tokens, tmp_trigger_tokens)

                    assert len(tmp_index_list)

                    for _index in tmp_index_list:
                        for i in range(_index[0], _index[1] + 1):
                            distant_trigger_label[i] = 1

                if len(distant_trigger_label) > 510:
                    distant_trigger_label = distant_trigger_label[:510]
                distant_trigger_label = [0] + distant_trigger_label + [0]

                distant_trigger_label = torch.tensor([distant_trigger_label
                                                      ]).long()
                trigger_inputs['distant_trigger'] = distant_trigger_label

            for key in trigger_inputs.keys():
                trigger_inputs[key] = trigger_inputs[key].to(device)

            tmp_trigger_pred = trigger_model(**trigger_inputs)[0][0]

            tmp_trigger_pred = tmp_trigger_pred.cpu().numpy()[1:1 +
                                                              len(tmp_text)]

            tmp_triggers = pointer_trigger_decode(
                tmp_trigger_pred,
                tmp_text,
                distant_triggers,
                start_threshold=opt.trigger_start_threshold,
                end_threshold=opt.trigger_end_threshold,
                one_trigger=True)

            if not len(tmp_triggers):
                print(_ex['sentence'])

            events = []

            for _trigger in tmp_triggers:
                tmp_event = {
                    'trigger': {
                        'text': _trigger[0],
                        'length': len(_trigger[0]),
                        'offset': int(_trigger[1])
                    },
                    'arguments': []
                }

                if len(_trigger) > 2:
                    print(_trigger)

                role_inputs = copy.deepcopy(tmp_base_inputs)

                # TODO 此处 start end 与新一版本的模型不一致,此版本是正确的
                trigger_start = _trigger[1] + 1
                trigger_end = trigger_start + len(_trigger[0]) - 1

                for i in range(trigger_start, trigger_end + 1):
                    role_inputs['token_type_ids'][0][i] = 1

                tmp_trigger_label = torch.tensor([[trigger_start,
                                                   trigger_end]]).long()

                role_inputs['trigger_index'] = tmp_trigger_label

                trigger_distance = [511] * (len(tmp_text) + 2)
                for i in range(len(tmp_text)):
                    if trigger_start <= i <= trigger_end:
                        trigger_distance[i] = 0
                        continue
                    elif i < trigger_start:
                        trigger_distance[i] = trigger_start - i
                    else:
                        trigger_distance[i] = i - trigger_end

                if opt.role1_use_trigger_distance or opt.role2_use_trigger_distance:
                    role_inputs['trigger_distance'] = torch.tensor(
                        [trigger_distance]).long()

                for key in role_inputs.keys():
                    role_inputs[key] = role_inputs[key].to(device)

                tmp_roles_pred = role1_model(**role_inputs)[0][0].cpu().numpy()

                tmp_roles_pred = tmp_roles_pred[1:1 + len(tmp_text)]

                pred_obj = pointer_decode(tmp_roles_pred[:, :2], tmp_text,
                                          start_threshold, end_threshold, True)

                pred_sub = pointer_decode(tmp_roles_pred[:, 2:], tmp_text,
                                          start_threshold, end_threshold, True)

                if len(pred_obj) > 1:
                    print(pred_obj)

                if len(pred_sub) > 1:
                    print(pred_sub)

                pred_aux_tokens = role2_model(**role_inputs)[0][0]
                pred_aux = crf_decode(pred_aux_tokens, tmp_text, id2role)

                for _obj in pred_obj:
                    tmp_event['arguments'].append({
                        'role': 'object',
                        'text': _obj[0],
                        'offset': int(_obj[1]),
                        'length': len(_obj[0])
                    })
                for _sub in pred_sub:
                    tmp_event['arguments'].append({
                        'role': 'subject',
                        'text': _sub[0],
                        'offset': int(_sub[1]),
                        'length': len(_sub[0])
                    })

                for _role_type in pred_aux.keys():
                    for _role in pred_aux[_role_type]:
                        tmp_event['arguments'].append({
                            'role': _role_type,
                            'text': _role[0],
                            'offset': int(_role[1]),
                            'length': len(_role[0])
                        })

                att_inputs = copy.deepcopy(tmp_base_inputs)

                att_inputs['trigger_index'] = tmp_trigger_label

                window_size = 20

                pooling_masks_range = range(
                    max(1, trigger_start - window_size),
                    min(min(1 + len(tmp_text), 511),
                        trigger_end + window_size))

                pooling_masks = [0] * (2 + len(tmp_text))
                for i in pooling_masks_range:
                    pooling_masks[i] = 1
                for i in range(trigger_start, trigger_end + 1):
                    pooling_masks[i] = 0

                att_inputs['pooling_masks'] = torch.tensor([pooling_masks
                                                            ]).float()

                for key in att_inputs.keys():
                    att_inputs[key] = att_inputs[key].to(device)

                polarity_logits, tense_logits = attribution_model(**att_inputs)

                polarity_logits = polarity_logits[0].cpu().numpy()
                tense_logits = tense_logits[0].cpu().numpy()

                tense = id2tense[np.argmax(tense_logits)]
                polarity = id2polarity[np.argmax(polarity_logits)]

                tmp_event['polarity'] = polarity
                tmp_event['tense'] = tense

                events.append(tmp_event)

            tmp_instance['events'] = events
            submit.append(tmp_instance)

    submit, nums = clean_data(submit)

    print(f'Clean {nums} data')
    with open(os.path.join(opt.submit_dir, f'submit_{opt.version}.json'),
              'w',
              encoding='utf-8') as f:
        json.dump(submit, f, ensure_ascii=False, indent=2)

    logger.info(f'{counts} blank examples')
Ejemplo n.º 5
0
def evaluate(opt):
    processors = {
        'trigger': TriggerProcessor,
        'role1': RoleProcessor,
        'role2': RoleProcessor,
        'attribution': AttributionProcessor
    }

    processor = processors[opt.task_type]()

    info_dict = prepare_info(opt.task_type, opt.mid_data_dir)

    feature_para, dataset_para, model_para = prepare_para_dict(opt, info_dict)

    dev_raw_examples = processor.read_json(
        os.path.join(opt.raw_data_dir, 'dev.json'))

    dev_examples, dev_callback_info = processor.get_dev_examples(
        dev_raw_examples)

    dev_features = convert_examples_to_features(opt.task_type, dev_examples,
                                                opt.bert_dir, opt.max_seq_len,
                                                **feature_para)

    logger.info(f'Build {len(dev_features)} dev features')

    dev_dataset = build_dataset(opt.task_type,
                                dev_features,
                                mode='dev',
                                **dataset_para)

    dev_loader = DataLoader(dev_dataset,
                            batch_size=opt.eval_batch_size,
                            shuffle=False,
                            num_workers=8)

    dev_info = (dev_loader, dev_callback_info)

    model = build_model(opt.task_type, opt.bert_dir, **model_para)

    model_path_list = get_model_path_list(opt.dev_dir)

    metric_str = ''

    max_f1 = 0.
    max_f1_step = 0

    for idx, model_path in enumerate(model_path_list):

        tmp_step = model_path.split('/')[-2].split('-')[-1]

        model, device = load_model_and_parallel(model,
                                                opt.gpu_ids[0],
                                                ckpt_path=model_path)

        if opt.task_type == 'trigger':

            tmp_metric_str, tmp_f1 = trigger_evaluation(
                model,
                dev_info,
                device,
                start_threshold=opt.start_threshold,
                end_threshold=opt.end_threshold)

        elif opt.task_type == 'role1':
            tmp_metric_str, tmp_f1 = role1_evaluation(
                model,
                dev_info,
                device,
                start_threshold=opt.start_threshold,
                end_threshold=opt.end_threshold)
        elif opt.task_type == 'role2':
            tmp_metric_str, tmp_f1 = role2_evaluation(model, dev_info, device)
        else:
            tmp_metric_str, tmp_f1 = attribution_evaluation(
                model,
                dev_info,
                device,
                polarity2id=info_dict['polarity2id'],
                tense2id=info_dict['tense2id'])

        logger.info(f'In step {tmp_step}:\n{tmp_metric_str}')

        metric_str += f'In step {tmp_step}:\n{tmp_metric_str}\n\n'

        if tmp_f1 > max_f1:
            max_f1 = tmp_f1
            max_f1_step = tmp_step

    max_metric_str = f'Max f1 is: {max_f1}, in step {max_f1_step}\n'

    logger.info(max_metric_str)

    metric_str += max_metric_str + '\n'

    eval_save_path = os.path.join(opt.dev_dir, 'eval_metric.txt')

    with open(eval_save_path, 'a', encoding='utf-8') as f1:
        f1.write(metric_str)
Ejemplo n.º 6
0
def training(opt):
    processors = {
        'oce': (OceProcessor, 'OCEMOTION'),
        'tnews': (TnewsProcessor, 'TNEWS'),
        'ocnli': (OcnliProcessor, 'OCNLI')
    }
    devs = []
    model = build_model(opt.bert_dir)
    out_dir_base = opt.output_dir

    if opt.mode == 'train':

        total_examples = []
        for task in processors.keys():
            logger.info(f'############## Task : {task} load ##############')
            processor = processors[task][0]()
            # file_train = processors[task][1]+'_train_s.csv'
            file_train = processors[task][1] + '_train_s.csv'
            train_raw_examples = processor.load_raw(
                os.path.join(opt.raw_data_dir, file_train))
            train_examples = processor.get_train_examples(train_raw_examples)
            total_examples += train_examples
        train_features = convert_examples_to_features('total', total_examples,
                                                      opt.bert_dir,
                                                      opt.max_seq_len)
        logger.info(f'Build {len(train_features)} train features')
        train_dataset = build_dataset('total', train_features, 'train')

        opt.output_dir = os.path.join(out_dir_base, opt.bert_type, 'total')
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir, exist_ok=True)
        train('total', opt, model, train_dataset)

    if opt.eval_model:
        for task in processors.keys():
            logger.info(
                f'------------------------------------------------------------Task : {task} load ---------------------------------------------------------------'
            )

            file_dev = processors[task][1] + '_dev.csv'
            processor = processors[task][0]()

            dev_raw_examples = processor.load_raw(
                os.path.join(opt.raw_data_dir, file_dev))
            dev_examples = processor.get_dev_examples(dev_raw_examples)
            dev_features = convert_examples_to_features(
                task, dev_examples, opt.bert_dir, opt.max_seq_len)
            dev_dataset = build_dataset(task, dev_features, 'dev')
            dev_loader = DataLoader(dev_dataset,
                                    batch_size=opt.eval_batch_size,
                                    shuffle=False,
                                    num_workers=0)
            model_path_list = get_model_path_list(opt.output_dir)

            for idx, model_path in enumerate(model_path_list):
                tmp_step = model_path.split('/')[-2].split('-')[-1]
                model, device = load_model_and_parallel(model,
                                                        opt.gpu_ids[0],
                                                        ckpt_path=model_path)
                pred_logits = None
                labels = None
                for tmp_pred in get_base_out(task, model, dev_loader, device):
                    pred = tmp_pred[0].cpu().numpy()
                    label = tmp_pred[1].cpu().numpy()

                    if pred_logits is None:
                        pred_logits = pred
                    else:
                        pred_logits = np.append(pred_logits, pred, axis=0)

                    if labels is None:
                        labels = label
                    else:
                        labels = np.append(labels, label, axis=0)

                preds = np.argmax(pred_logits, -1)
                macro_f1_score = f1_score(labels, preds, average='macro')
                logger.info(f'\nmacro f1 at {model_path} is {macro_f1_score}')
                logger.info(f'\nconfusion_matrix:')
                cm_res = confusion_matrix(
                    labels,
                    preds,
                )
                logger.info(f'{cm_res}')
                logger.info(f'{classification_report(labels,preds,)}')