コード例 #1
0
    def __init__(self, model, config, **kwargs):
        super().__init__(kwargs['dataset'],
                         model,
                         kwargs['embedding'],
                         kwargs['data_loader'],
                         batch_size=config['batch_size'],
                         device=config['device'])

        if config['model'] in {
                'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'
        }:
            variant = 'bert-large-uncased' if config[
                'model'] == 'BERT-Large' else 'bert-base-uncased'
            self.tokenizer = BertTokenizer.from_pretrained(
                variant, is_lowercase=config['is_lowercase'])
            self.processor = kwargs['processor']
            if config['split'] == 'test':
                self.eval_examples = self.processor.get_test_examples(
                    config['data_dir'], topic=config['topic'])
            else:
                self.eval_examples = self.processor.get_dev_examples(
                    config['data_dir'], topic=config['topic'])

        self.config = config
        self.ignore_lengths = config['ignore_lengths']
        self.y_target = None
        self.y_pred = None
        self.docid = None
コード例 #2
0
    def __init__(self, model, config, **kwargs):
        super().__init__(model, kwargs['embedding'], kwargs['train_loader'],
                         config, None, kwargs['test_evaluator'],
                         kwargs['dev_evaluator'])

        if config['model'] in {
                'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'
        }:
            variant = 'bert-large-uncased' if config[
                'model'] == 'BERT-Large' else 'bert-base-uncased'
            self.tokenizer = BertTokenizer.from_pretrained(
                variant, is_lowercase=config['is_lowercase'])
            self.processor = kwargs['processor']
            self.optimizer = config['optimizer']
            self.train_examples = self.processor.get_train_examples(
                config['data_dir'], topic=config['topic'])
            self.num_train_optimization_steps = int(
                len(self.train_examples) / config['batch_size'] /
                config['gradient_accumulation_steps']) * config['epochs']
        self.config = config
        self.early_stop = False
        self.best_dev_ap = 0
        self.iterations = 0
        self.unimproved_iters = 0

        self.log_header = 'Epoch Iteration Progress   Dev/Acc.  Dev/Pr.  Dev/AP.   Dev/F1   Dev/Loss'
        self.log_template = ' '.join(
            '{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'
            .split(','))

        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        self.snapshot_path = os.path.join(self.model_outfile,
                                          config['dataset'].NAME,
                                          '%s.pt' % timestamp)
コード例 #3
0
    def __init__(self, model, optimizer, processor, args):
        self.args = args
        self.model = model
        self.optimizer = optimizer
        self.processor = processor
        self.train_examples = self.processor.get_train_examples(args.data_dir)
        self.tokenizer = BertTokenizer.from_pretrained(
            args.model, is_lowercase=args.is_lowercase)

        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        self.snapshot_path = os.path.join(self.args.save_path,
                                          self.processor.NAME,
                                          '%s.pt' % timestamp)

        self.num_train_optimization_steps = int(
            len(self.train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            self.num_train_optimization_steps = args.num_train_optimization_steps // torch.distributed.get_world_size(
            )
        self.log_header = 'Epoch Iteration Progress   Dev/Acc.  Dev/Hamm.  Dev/Jacc.   Dev/Prec Dev/Rec Dev/micro-F1 Dev/F1  Dev/Loss'
        self.log_template = ' '.join(
            '{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'
            .split(','))

        self.iterations, self.nb_tr_steps, self.tr_loss = 0, 0, 0
        self.best_dev_f1, self.unimproved_iters = 0, 0
        self.early_stop = False
コード例 #4
0
 def __init__(self, model, processor, args, split='dev'):
     self.args = args
     self.model = model
     self.processor = processor
     self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
     if split == 'test':
         self.eval_examples = self.processor.get_test_examples(args.data_dir)
     else:
         self.eval_examples = self.processor.get_dev_examples(args.data_dir)
コード例 #5
0
def load_text(texts: List[str]):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    max_pos = 512
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
                                              do_lower_case=True)
    sep_vid = tokenizer.vocab['[SEP]']
    cls_vid = tokenizer.vocab['[CLS]']

    n_lines = len(texts)

    def _process_src(raw):
        raw = raw.strip().lower()
        src_subtokens = tokenizer.tokenize(raw)
        src_subtokens = ['[CLS]'] + src_subtokens + ['[SEP]']
        src_subtoken_idxs = tokenizer.convert_tokens_to_ids(src_subtokens)
        src_subtoken_idxs = src_subtoken_idxs[:-1][:max_pos]
        src_subtoken_idxs[-1] = sep_vid
        _segs = [-1] + \
            [i for i, t in enumerate(src_subtoken_idxs) if t == sep_vid]
        segs = [_segs[i] - _segs[i - 1] for i in range(1, len(_segs))]
        segments_ids = []
        segs = segs[:max_pos]
        for i, s in enumerate(segs):
            if (i % 2 == 0):
                segments_ids += s * [0]
            else:
                segments_ids += s * [1]

        src = torch.tensor(src_subtoken_idxs)[None, :].to(device)
        mask_src = (1 - (src == 0).float()).to(device)
        cls_ids = [[
            i for i, t in enumerate(src_subtoken_idxs) if t == cls_vid
        ]]
        clss = torch.tensor(cls_ids).to(device)
        mask_cls = 1 - (clss == -1).float()
        clss[clss == -1] = 0

        return src, mask_src, segments_ids, clss, mask_cls

    for x in tqdm(texts, total=n_lines):
        src, mask_src, segments_ids, clss, mask_cls = _process_src(x)
        segs = torch.tensor(segments_ids)[None, :].to(device)
        batch = Batch()
        batch.src = src
        batch.tgt = None
        batch.mask_src = mask_src
        batch.mask_tgt = None
        batch.segs = segs
        batch.src_str = [[
            sent.replace('[SEP]', '').strip() for sent in x.split('[CLS]')
        ]]
        batch.tgt_str = ['']
        batch.clss = clss
        batch.mask_cls = mask_cls

        batch.batch_size = 1
        yield batch
コード例 #6
0
 def __init__(self, model, processor, args, split='dev'):
     self.args = args
     self.model = model
     self.processor = processor
     self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
     self.emotioner = Emotion(args.nrc_path, args.max_em_len, args.emotion_filters)
     if split == 'test':
         self.eval_examples = self.processor.get_test_examples(args.data_dir, args.test_name)
     elif split == 'dev':
         self.eval_examples = self.processor.get_dev_examples(args.data_dir, args.dev_name)
     else:
         self.eval_examples = self.processor.get_any_examples(args.data_dir, split)
コード例 #7
0
def pretrain_model():
    print("======MLM & NSP Pretraining======")
    """ MLM & NSP Pretraining 
    You can modify this function by yourself.
    This function does not affects your final score.
    """
    tokenizer = BertTokenizer(args.vocab_file, max_len=args.max_seq_length)
    comment_train_dataset = ParagraphDataset(
        os.path.join('data', 'korean-hate-speech', 'unlabeled'))
    title_train_dataset = ParagraphDataset(
        os.path.join('data', 'korean-hate-speech', 'news_title'))
    train_dataset = PretrainDataset(args.max_seq_length, comment_train_dataset,
                                    title_train_dataset, tokenizer)
    comment_val_dataset = ParagraphDataset(
        os.path.join('data', 'korean-hate-speech', 'unlabeled', 'dev'))
    title_val_dataset = ParagraphDataset(
        os.path.join('data', 'korean-hate-speech', 'news_title', 'dev'))
    val_dataset = PretrainDataset(args.max_seq_length, comment_val_dataset,
                                  title_val_dataset, tokenizer)
    config = Config(args.config_file)
    device = torch.device('cpu')
    model = MlmNspModel(config).to(torch.device(device))
    model.bert.load_state_dict(torch.load(args.checkpoint,
                                          map_location=device))

    model_name = 'pretrained'

    MLM_train_losses, MLM_val_losses, NSP_train_losses, NSP_val_losses \
            = pretraining(model, model_name, train_dataset, val_dataset)

    torch.save(model.state_dict(), model_name + '_final.pth')

    with open(model_name + '_result.pkl', 'wb') as f:
        pickle.dump((MLM_train_losses, MLM_val_losses, NSP_train_losses,
                     NSP_val_losses), f)

    utils_pretrain.plot_values(MLM_train_losses,
                               MLM_val_losses,
                               title=model_name + "_mlm")
    utils_pretrain.plot_values(NSP_train_losses,
                               NSP_val_losses,
                               title=model_name + "_nsp")

    print("Final MLM training loss: {:06.4f}".format(MLM_train_losses[-1]))
    print("Final MLM validation loss: {:06.4f}".format(MLM_val_losses[-1]))
    print("Final NSP training loss: {:06.4f}".format(NSP_train_losses[-1]))
    print("Final NSP validation loss: {:06.4f}".format(NSP_val_losses[-1]))
コード例 #8
0
ファイル: main.py プロジェクト: jiachenwestlake/Entity_BERT
def decode(hp):
    tokenizer = BertTokenizer.from_pretrained(hp.bert_model_dir, do_lower_case=False)
    if hp.dataset == "lm_raw_data_finance":
        dict_file = "../data/dataset_finance/annual_report_entity_list"
    elif hp.dataset == "lm_raw_data_novel":
        dict_file = "../data/dataset_book9/entity_book9"
    entity_dict = EntityDict(hp.dataset, dict_file)
    entity_dict.load(os.path.join(hp.bert_model_dir, 'entity.dict'))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    ner_label = NerLabel([hp.decodeset])
    if os.path.exists(os.path.join(hp.logdir, 'dict.pt')):
        ner_label.load(os.path.join(hp.logdir, 'dict.pt'))
    else:
        print('dict.pt is not exit')
        exit()

    decode_dataset = NerDataset(hp.decodeset, ner_label, tokenizer, entity_dict)

    model = Net(hp.bert_model_dir, hp.top_rnns, len(ner_label.VOCAB), entity_dict.entity_num, device, hp.finetuning).to(device)
    model = nn.DataParallel(model)
    ## Load the model parameters
    if os.path.exists(os.path.join(hp.logdir, 'model.pt')):
        model.load_state_dict(torch.load(os.path.join(hp.logdir, 'model.pt')))
    else:
        print("the pretrianed model path does not exist! ")
        exit()

    decode_iter = data.DataLoader(dataset=decode_dataset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 num_workers=4,
                                 collate_fn=pad)

    fname = os.path.join(hp.logdir, '_')

    precision, recall, f1 = evaluate(model, decode_iter, fname, ner_label, verbose=False)
コード例 #9
0
def main():
    parser = argparse.ArgumentParser()
    # Parameters
    parser.add_argument(
        "--output_dir",
        default='output',
        type=str,
        help=
        "The output directory where the model checkpoints and predictions will be written."
    )
    parser.add_argument("--checkpoint",
                        default='large_v1/large_v1_model.bin',
                        type=str,
                        help="pre-trained model checkpoint")
    parser.add_argument("--config_file",
                        default='large_v1/large_config.json',
                        type=str,
                        help="model configuration file")
    parser.add_argument("--vocab_file",
                        default='large_v1/large_v1_32k_vocab.txt',
                        type=str,
                        help="tokenizer vocab file")
    parser.add_argument("--train_file",
                        default='data/korquad/KorQuAD_v1.0_train.json',
                        type=str,
                        help="SQuAD json for training. E.g., train-v1.1.json")

    parser.add_argument("--test",
                        default=False,
                        type=bool,
                        help="Test by sample")
    parser.add_argument("--train_data_limit",
                        default=-1,
                        type=int,
                        help="For Check EDA performance limit train data")
    ######ADD for EDA#######
    parser.add_argument(
        "--same_random_index",
        default=True,
        type=bool,
        help="for eda data limit, if True every eda pick same random index")
    parser.add_argument("--eda_type",
                        default="no_eda",
                        type=str,
                        help="eda type e.g) sr, ri, rs, rd")
    parser.add_argument("--alpha", default=0.1, type=float, help="For eda")
    parser.add_argument("--num_aug", default=2, type=int, help="For eda")
    parser.add_argument("--eda_model_name",
                        default="word2vec",
                        type=str,
                        help="For eda")
    parser.add_argument("--min_score", default=0.7, type=float, help="For eda")
    parser.add_argument("--dict_file_name",
                        default="data/syn/synonyms_file.pickle",
                        type=str)
    ########################

    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. Sequences "
        "longer than this will be truncated, and sequences shorter than this will be padded."
    )
    parser.add_argument(
        "--doc_stride",
        default=128,
        type=int,
        help=
        "When splitting up a long document into chunks, how much stride to take between chunks."
    )
    parser.add_argument(
        "--max_query_length",
        default=64,
        type=int,
        help=
        "The maximum number of tokens for the question. Questions longer than this will "
        "be truncated to this length.")
    parser.add_argument(
        "--max_answer_length",
        default=30,
        type=int,
        help=
        "The maximum length of an answer that can be generated. This is needed because the start "
        "and end predictions are not conditioned on one another.")
    parser.add_argument("--per_gpu_train_batch_size",
                        default=4,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument(
        '--gradient_accumulation_steps',
        default=1,
        type=int,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay",
                        default=0.01,
                        type=float,
                        help="Weight deay if we apply some.")
    parser.add_argument("--num_train_epochs",
                        default=4.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument("--adam_epsilon",
                        default=1e-6,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% "
        "of training.")
    parser.add_argument(
        "--n_best_size",
        default=20,
        type=int,
        help=
        "The total number of n-best predictions to generate in the nbest_predictions.json "
        "output file.")
    parser.add_argument(
        "--verbose_logging",
        action='store_true',
        help=
        "If true, all of the warnings related to data processing will be printed. "
        "A number of warnings are expected for a normal SQuAD evaluation.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")

    parser.add_argument('--seed',
                        default=42,
                        type=int,
                        help="random seed for initialization")
    parser.add_argument(
        '--fp16',
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--fp16_opt_level',
        default='O2',
        type=str,
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument("--local_rank",
                        default=-1,
                        type=int,
                        help="For distributed training: local_rank")
    parser.add_argument(
        '--null_score_diff_threshold',
        type=float,
        default=0.0,
        help=
        "If null_score - best_non_null is greater than the threshold predict null."
    )

    args = parser.parse_args()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        args.local_rank, device, args.n_gpu, bool(args.local_rank != -1),
        args.fp16)

    # Set seed
    set_seed(args)

    tokenizer = BertTokenizer(args.vocab_file,
                              max_len=args.max_seq_length,
                              do_basic_tokenize=True)

    # Prepare model
    config = Config.from_json_file(args.config_file)
    model = QuestionAnswering(config)
    model.load_state_dict(torch.load(args.checkpoint))
    num_params = count_parameters(model)
    logger.info("Total Parameter: %d" % num_params)
    model.to(device)

    logger.info("Training hyper-parameters %s", args)

    # Training
    train_dataset = load_and_cache_examples(args, tokenizer)
    train(args, train_dataset, model)
コード例 #10
0
def main():
    parser = argparse.ArgumentParser()
    # Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--config_file",
                        type=str,
                        required=True,
                        help="model configuration file")
    parser.add_argument("--vocab_file",
                        type=str,
                        required=True,
                        help="tokenizer vocab file")
    parser.add_argument("--checkpoint",
                        default=None,
                        type=str,
                        required=True,
                        help="fine-tuned model checkpoint")
    # Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--eval_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()
    args.n_gpu = n_gpu

    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt = '%m/%d/%Y %H:%M:%S',
                        level = logging.INFO)

    logger.info("device: {} n_gpu: {}, 16-bits training: {}".format(
        device, n_gpu, args.fp16))

    set_seed(args)

    task_name = args.task_name.lower()
    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
    output_mode = output_modes[task_name]

    label_list = processor.get_labels()
    num_labels = len(label_list)

    tokenizer = BertTokenizer(args.vocab_file, max_len=args.max_seq_length)

    test_examples = processor.get_test_examples(args.data_dir)
    texts = [test_example.text_a for test_example in test_examples]

    # Prepare model
    config = Config(args.config_file)
    model = SequenceClassificationMultitask(config)
    model.load_state_dict(torch.load(args.checkpoint))
    num_params = count_parameters(model)
    logger.info("Total Parameter: %d" % num_params)
    model.to(device)
    if args.fp16:
        model = model.half()

    test_features = convert_examples_to_features(
        test_examples, label_list, args.max_seq_length, tokenizer, output_mode)
    logger.info("***** Running Evaluation *****")
    logger.info("  Num examples = %d", len(test_examples))
    logger.info("  Batch size = %d", args.eval_batch_size)
    all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)

    if output_mode == "classification_multitask":
        all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
    if output_mode == "classification":
        all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
    elif output_mode == "regression":
        all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.float)

    test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
    test_sampler = SequentialSampler(test_data)
    test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)

    model.eval()
    preds = []
    preds0 = []
    preds1 = []
    preds2 = []
    for input_ids, input_mask, segment_ids, label_ids in tqdm(test_dataloader, desc="Evaluating"):
        input_ids = input_ids.to(device)
        input_mask = input_mask.to(device)
        segment_ids = segment_ids.to(device)
        label_ids = label_ids.to(device)

        with torch.no_grad():
            logits = model(input_ids, segment_ids, input_mask, labels=None)

        if output_mode == "classification_multitask":
            if len(preds0) == 0:
                preds0.append(logits[0].detach().cpu().numpy())
                preds1.append(logits[1].detach().cpu().numpy())
                preds2.append(logits[2].detach().cpu().numpy())
            else:
                preds0[0] = np.append(preds0[0], logits[0].detach().cpu().numpy(), axis=0)
                preds1[0] = np.append(preds1[0], logits[1].detach().cpu().numpy(), axis=0)
                preds2[0] = np.append(preds2[0], logits[2].detach().cpu().numpy(), axis=0)
        else:
            if len(preds) == 0:
                preds.append(logits.detach().cpu().numpy())
            else:
                preds[0] = np.append(
                    preds[0], logits.detach().cpu().numpy(), axis=0)

    if output_mode == "classification_multitask":
        preds0 = preds0[0]
        preds1 = preds1[0]
        preds2 = preds2[0]

        preds0 = np.argmax(preds0, axis=1)
        preds1 = np.argmax(preds1, axis=1)
        preds2 = np.argmax(preds2, axis=1)
        preds = [preds0, preds1, preds2]
        label=[]
        for p in preds0:
            if p==0:
                label.append('True')
            elif p==1:
                label.append('False')

    elif output_mode == "classification":
        preds = preds[0]
        preds = np.argmax(preds, axis=1)
    elif output_mode == "regression":
        preds = preds[0]
        preds = np.squeeze(preds)

    #TODO: write the csv
    #result = compute_metrics(task_name, preds, all_label_ids.numpy())
    data = {'comments':texts,
            'label':label}
    data_df = DataFrame(data)
    csv_write_path='/mnt/sdc1/korLM/by_me/out/by_me_9_gender_0608.csv'
    # data_df.to_csv(csv_write_path, sep=',',index=False)
    with open(csv_write_path, 'w', encoding='utf-8') as f:
        wr = csv.writer(f)
        wr.writerow(['comments','label'])
        for text, lab in zip(texts, label):
            wr.writerow([text, lab])


    logger.info("***** Eval results *****")
コード例 #11
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {
        'SST-2': SST2Processor,
        'Reuters': ReutersProcessor,
        'IMDB': IMDBProcessor,
        'AAPD': AAPDProcessor,
        'AGNews': AGNewsProcessor,
        'Yelp2014': Yelp2014Processor,
        'Sogou': SogouProcessor,
        'Personality': PersonalityProcessor,
        'News_art': News_artProcessor,
        'News': News_Processor,
        'UCI_yelp': UCI_yelpProcessor,
        'Procon': ProconProcessor,
        'Style': StyleProcessor,
        'ProconDual': ProconDualProcessor,
        'Pan15': Pan15_Processor,
        'Pan14E': Pan14E_Processor,
        'Pan14N': Pan14N_Processor,
        'Perspectrum': PerspectrumProcessor
    }

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if not args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            len(train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
    model = BertForSequenceClassification.from_pretrained(
        args.model, cache_dir=cache_dir, num_labels=args.num_labels)

    if args.fp16:
        model.half()
    model.to(device)

    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Install NVIDIA Apex to use distributed and FP16 training.")
        model = DDP(model)
    '''elif n_gpu > 1: changed by marjan

        model = torch.nn.DataParallel(model)'''

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install NVIDIA Apex for distributed and FP16 training")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.lr,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.lr,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

    trainer = BertTrainer(model, optimizer, processor, args)

    if not args.trained_model:
        trainer.train()
        model = torch.load(trainer.snapshot_path)
    else:
        model = BertForSequenceClassification.from_pretrained(
            args.model, num_labels=args.num_labels)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
        model = model.to(device)

    evaluate_split(model, processor, args, split=args.dev_name)
    evaluate_split(model, processor, args, split=args.test_name)
コード例 #12
0
ファイル: __main__.py プロジェクト: vrmpx/hedwig
    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = True
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if not args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir)
        num_train_optimization_steps = int(
            len(train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
コード例 #13
0
def main():
    parser = argparse.ArgumentParser()
    # Required parameters

    parser.add_argument("--output_dir", default='output', type=str,
                        help="The output directory where the model checkpoints and predictions will be written.")
    parser.add_argument("--checkpoint", default='pretrain_ckpt/bert_small_ckpt.bin',
                        type=str,
                        help="checkpoint")
    parser.add_argument("--model_config", default='data/bert_small.json',
                        type=str)
    # Other parameters
    parser.add_argument("--train_file", default='data/KorQuAD_v1.0_train.json', type=str,
                        help="SQuAD json for training. E.g., train-v1.1.json")
    parser.add_argument("--max_seq_length", default=512, type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. Sequences "
                             "longer than this will be truncated, and sequences shorter than this will be padded.")
    parser.add_argument("--doc_stride", default=128, type=int,
                        help="When splitting up a long document into chunks, how much stride to take between chunks.")
    parser.add_argument("--max_query_length", default=96, type=int,
                        help="The maximum number of tokens for the question. Questions longer than this will "
                             "be truncated to this length.")
    parser.add_argument("--train_batch_size", default=16, type=int, help="Total batch size for training.")
    parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs", default=4.0, type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
                        help="Max gradient norm.")
    parser.add_argument("--adam_epsilon", default=1e-6, type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--warmup_proportion", default=0.1, type=float,
                        help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% "
                             "of training.")
    parser.add_argument("--n_best_size", default=20, type=int,
                        help="The total number of n-best predictions to generate in the nbest_predictions.json "
                             "output file.")
    parser.add_argument("--max_answer_length", default=30, type=int,
                        help="The maximum length of an answer that can be generated. This is needed because the start "
                             "and end predictions are not conditioned on one another.")
    parser.add_argument("--verbose_logging", action='store_true',
                        help="If true, all of the warnings related to data processing will be printed. "
                             "A number of warnings are expected for a normal SQuAD evaluation.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--fp16_opt_level', type=str, default='O2',
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
                             "See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument('--null_score_diff_threshold',
                        type=float, default=0.0,
                        help="If null_score - best_non_null is greater than the threshold predict null.")

    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()
    logger.info("device: {} n_gpu: {}, 16-bits training: {}".format(
        device, n_gpu, args.fp16))

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    tokenizer = BertTokenizer('ko_vocab_32k.txt',
                              max_len=args.max_seq_length,
                              do_basic_tokenize=True)
    # Prepare model
    config = Config.from_json_file(args.model_config)
    model = QuestionAnswering(config)
    model.bert.load_state_dict(torch.load(args.checkpoint))
    num_params = count_parameters(model)
    logger.info("Total Parameter: %d" % num_params)
    model.to(device)

    cached_train_features_file = args.train_file + '_{0}_{1}_{2}'.format(str(args.max_seq_length), str(args.doc_stride),
                                                                         str(args.max_query_length))
    train_examples = read_squad_examples(input_file=args.train_file, is_training=True, version_2_with_negative=False)
    try:
        with open(cached_train_features_file, "rb") as reader:
            train_features = pickle.load(reader)
    except:
        train_features = convert_examples_to_features(
            examples=train_examples,
            tokenizer=tokenizer,
            max_seq_length=args.max_seq_length,
            doc_stride=args.doc_stride,
            max_query_length=args.max_query_length,
            is_training=True)
        logger.info("  Saving train features into cached file %s", cached_train_features_file)
        with open(cached_train_features_file, "wb") as writer:
            pickle.dump(train_features, writer)

    num_train_optimization_steps = int(len(train_features) / args.train_batch_size) * args.num_train_epochs

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]

    optimizer = AdamW(optimizer_grouped_parameters,
                      lr=args.learning_rate,
                      eps=args.adam_epsilon)
    scheduler = WarmupLinearSchedule(optimizer,
                                     warmup_steps=num_train_optimization_steps*0.1,
                                     t_total=num_train_optimization_steps)

    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)

    logger.info("***** Running training *****")
    logger.info("  Num orig examples = %d", len(train_examples))
    logger.info("  Num split examples = %d", len(train_features))
    logger.info("  Batch size = %d", args.train_batch_size)
    logger.info("  Num steps = %d", num_train_optimization_steps)
    num_train_step = num_train_optimization_steps

    all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
    all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
    all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
    train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
                               all_start_positions, all_end_positions)

    train_sampler = RandomSampler(train_data)
    train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

    model.train()
    global_step = 0
    epoch = 0
    for _ in trange(int(args.num_train_epochs), desc="Epoch"):
        iter_bar = tqdm(train_dataloader, desc="Train(XX Epoch) Step(XX/XX) (Mean loss=X.X) (loss=X.X)")
        tr_step, total_loss, mean_loss = 0, 0., 0.
        for step, batch in enumerate(iter_bar):
            if n_gpu == 1:
                batch = tuple(t.to(device) for t in batch)  # multi-gpu does scattering it-self
            input_ids, input_mask, segment_ids, start_positions, end_positions = batch
            loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
            if n_gpu > 1:
                loss = loss.mean()  # mean() to average on multi-gpu.
            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

            scheduler.step()
            optimizer.step()
            optimizer.zero_grad()
            global_step += 1
            tr_step += 1
            total_loss += loss
            mean_loss = total_loss / tr_step
            iter_bar.set_description("Train Step(%d / %d) (Mean loss=%5.5f) (loss=%5.5f)" %
                                     (global_step, num_train_step, mean_loss, loss.item()))

        logger.info("** ** * Saving file * ** **")
        model_checkpoint = "korquad_%d.bin" % (epoch)
        logger.info(model_checkpoint)
        output_model_file = os.path.join(args.output_dir,model_checkpoint)
        if n_gpu > 1:
            torch.save(model.module.state_dict(), output_model_file)
        else:
            torch.save(model.state_dict(), output_model_file)
        epoch += 1
コード例 #14
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--train_corpus",
                        default=None,
                        type=str,
                        required=True,
                        help="The input train corpus.")
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--learning_rate",
                        default=3e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--on_memory",
                        action='store_true',
                        help="Whether to load train samples into memory or use disk")
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Whether to lower case the input text. True for uncased models, False for cased models.")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumualte before performing a backward/update pass.")
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type = float, default = 0,
                        help = "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                        "0 (default value): dynamic loss scaling.\n"
                        "Positive power of 2: static loss scaling value.\n")

    args = parser.parse_args()

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train:
        raise ValueError("Training is currently the only implemented execution option. Please set `do_train`.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)

    #train_examples = None
    num_train_optimization_steps = None
    if args.do_train:
        print("Loading Train Dataset", args.train_corpus)
        train_dataset = BERTDataset(args.train_corpus, tokenizer, seq_len=args.max_seq_length,
                                    corpus_lines=None, on_memory=args.on_memory)
        num_train_optimization_steps = int(
            len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()

    # Prepare model
    model = BertForPreTraining.from_pretrained(args.bert_model)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    if args.do_train:
        param_optimizer = list(model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
            ]

        if args.fp16:
            try:
                from apex.optimizers import FP16_Optimizer
                from apex.optimizers import FusedAdam
            except ImportError:
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

            optimizer = FusedAdam(optimizer_grouped_parameters,
                                  lr=args.learning_rate,
                                  bias_correction=False,
                                  max_grad_norm=1.0)
            if args.loss_scale == 0:
                optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            else:
                optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
            warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
                                                 t_total=num_train_optimization_steps)

        else:
            optimizer = BertAdam(optimizer_grouped_parameters,
                                 lr=args.learning_rate,
                                 warmup=args.warmup_proportion,
                                 t_total=num_train_optimization_steps)

    global_step = 0
    if args.do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_dataset))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)

        if args.local_rank == -1:
            train_sampler = RandomSampler(train_dataset)
        else:
            #TODO: check if this works with current data generator from disk that relies on next(file)
            # (it doesn't return item back by index)
            train_sampler = DistributedSampler(train_dataset)
        train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)

        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
                loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

        # Save a trained model
        logger.info("** ** * Saving fine - tuned model ** ** * ")
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
        if args.do_train:
            torch.save(model_to_save.state_dict(), output_model_file)
            model_to_save.config.to_json_file(output_config_file)
            tokenizer.save_vocabulary(args.output_dir)
コード例 #15
0
ファイル: classify.py プロジェクト: marjanhs/procon20
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
import math
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectPercentile, chi2
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

# String templates for logging results
LOG_HEADER = 'Split  Dev/Acc.  Dev/Pr.  Dev/Re.   Dev/F1'
LOG_TEMPLATE = ' '.join('{:>5s},{:>9.4f},{:>8.4f},{:8.4f},{:8.4f}'.split(','))

bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
                                               is_lowercase=True)


def get_feature_vector(evaluators,
                       use_idf=False,
                       ngram_range=(1, 1),
                       max_seq_len=256):

    train_ev, dev_ev, test_ev = evaluators
    train_text, test_text, dev_text = [], [], []
    for i, x in enumerate(train_ev.eval_examples):
        #tokens_a = x.text_a.strip().split()
        tokens_a = [t for t in x.text_a.strip().split() if t not in ['', ' ']]
        #tokens_a = bert_tokenizer.tokenize(x.text_a)
        tokens_a = tokens_a[:max_seq_len]
        train_text.append(' '.join(tokens_a))
コード例 #16
0
def main(input, output):
    max_seq_length = 512
    doc_stride = 64
    max_query_length = 64
    batch_size = 16
    n_best_size = 20
    max_answer_length = 30
    seed = 42
    fp16 = False

    device = torch.device("cpu")
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    n_gpu = torch.cuda.device_count()
    logger.info("device: {} n_gpu: {}".format(device, n_gpu))

    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(seed)

    tokenizer = BertTokenizer(vocab_file=VOCAB_PATH,
                              max_len=max_seq_length,
                              do_basic_tokenize=True)
    eval_examples = read_squad_examples(input_file=input,
                                        is_training=False,
                                        version_2_with_negative=False)
    eval_features = convert_examples_to_features(
        examples=eval_examples,
        tokenizer=tokenizer,
        max_seq_length=max_seq_length,
        doc_stride=doc_stride,
        max_query_length=max_query_length,
        is_training=False)

    # Prepare model
    config = Config.from_json_file(CONFIG_PATH)
    model = QuestionAnswering(config)
    if fp16 is True:
        model.half()
    model.load_state_dict(torch.load(CHK_PATH, map_location=device))
    model.to(device)
    logger.info("***** Running training *****")
    logger.info("  Num orig examples = %d", len(eval_examples))
    logger.info("  Num split examples = %d", len(eval_features))
    logger.info("  Batch size = %d", batch_size)

    all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                 dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in eval_features],
                                  dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
                                   dtype=torch.long)
    all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
    eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
                              all_example_index)

    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data,
                                 sampler=eval_sampler,
                                 batch_size=batch_size)

    model.eval()
    all_results = []
    logger.info("Start evaluating")
    for input_ids, input_mask, segment_ids, example_indices in tqdm(
            eval_dataloader, desc="Evaluating"):
        input_ids = input_ids.to(device)
        input_mask = input_mask.to(device)
        segment_ids = segment_ids.to(device)
        with torch.no_grad():
            batch_start_logits, batch_end_logits = model(
                input_ids, segment_ids, input_mask)
        for i, example_index in enumerate(example_indices):
            start_logits = batch_start_logits[i].detach().cpu().tolist()
            end_logits = batch_end_logits[i].detach().cpu().tolist()
            eval_feature = eval_features[example_index.item()]
            unique_id = int(eval_feature.unique_id)
            all_results.append(
                RawResult(unique_id=unique_id,
                          start_logits=start_logits,
                          end_logits=end_logits))
    output_nbest_file = os.path.join("nbest_predictions.json")
    write_predictions(eval_examples, eval_features, all_results, n_best_size,
                      max_answer_length, False, output, output_nbest_file,
                      None, False, False, 0.0)
コード例 #17
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {'News_art': News_artProcessor, 'News': News_Processor}

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            math.ceil(len(train_examples) / args.batch_size) /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
    model = BertForSequenceClassification.from_pretrained(
        args.model, num_labels=2)  # creating news model!
    #model = BertForSequenceClassification.from_pretrained(args.model, cache_dir=cache_dir, num_labels=args.num_labels)

    if args.fp16:
        model.half()
    model.to(device)

    #model = BertForSequenceClassification.from_pretrained(args.model, num_labels=args.num_labels)
    model_ = torch.load(
        args.trained_model,
        map_location=lambda storage, loc: storage)  # load personality model
    state = {}
    for key in model_.state_dict().keys():
        new_key = key.replace("module.", "")
        state[new_key] = model_.state_dict()[key]

    del state['classifier.weight']  # removing  personality classifier!
    del state['classifier.bias']
    model.load_state_dict(state, strict=False)
    model = model.to(device)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    print('t_total :', num_train_optimization_steps)
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=args.lr,
                         warmup=args.warmup_proportion,
                         t_total=num_train_optimization_steps)
    args.freez_bert = False
    trainer = BertTrainer(model, optimizer, processor, args)

    trainer.train()
    model = torch.load(trainer.snapshot_path)

    evaluate_split(model, processor, args, split=args.dev_name)
    evaluate_split(model, processor, args, split=args.test_name)
コード例 #18
0
ファイル: classify.py プロジェクト: marjanhs/procon20
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {'News_art': News_artProcessor, 'News': News_Processor}

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    num_train_optimization_steps = None
    if args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            math.ceil(len(train_examples) / args.batch_size) /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    model_bert = BertForSequenceClassification.from_pretrained(args.model,
                                                               num_labels=4)

    model_bert.to(device)

    if args.trained_model:
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage
                            )  # load personality model
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]

        del state['classifier.weight']  # removing  personality classifier!
        del state['classifier.bias']
        model_bert.load_state_dict(state, strict=False)
        model_bert = model_bert.to(device)
    args.freez_bert = False
    evaluate(model_bert,
             processor,
             args,
             last_bert_layers=-1,
             ngram_range=(1, 1))
コード例 #19
0
def main():
    parser = argparse.ArgumentParser()
    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--config_file",
                        type=str,
                        required=True,
                        help="model configuration file")
    parser.add_argument("--vocab_file",
                        type=str,
                        required=True,
                        help="tokenizer vocab file")
    parser.add_argument("--checkpoint",
                        type=str,
                        required=True,
                        help="pretrained model checkpoint")
    # Other parameters
    parser.add_argument(
        "--output_dir",
        default="output",
        type=str,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--train_batch_size",
                        default=16,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=3e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=10.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument(
        '--fp16',
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--fp16_opt_level',
        type=str,
        default='O2',
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")
    args = parser.parse_args()

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of synchronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    args.n_gpu = n_gpu

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)

    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    set_seed(args)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    task_name = args.task_name.lower()
    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
    output_mode = output_modes[task_name]

    label_list = processor.get_labels()
    num_labels = len(label_list)

    tokenizer = BertTokenizer(args.vocab_file, max_len=args.max_seq_length)

    train_examples = processor.get_train_examples(args.data_dir)
    num_train_optimization_steps = int(
        len(train_examples) / args.train_batch_size /
        args.gradient_accumulation_steps) * args.num_train_epochs
    if args.local_rank != -1:
        num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
        )

    # Prepare model
    config = Config(args.config_file)
    #
    if output_mode == "classification":
        model = SequenceClassification(config, num_labels=num_labels)
    elif output_mode == "classification_multitask":
        model = SequenceClassificationMultitask(config)
    model.bert.load_state_dict(torch.load(args.checkpoint))

    # Do not fine-tune BERT parameters
    # for param in model.bert.parameters():
    #     param.requires_grad = False
    num_params = count_parameters(model)
    logger.info("Total Parameter: %d" % num_params)
    model.to(device)

    global_step, tr_loss = 0, 0.
    train_features = convert_examples_to_features(train_examples, label_list,
                                                  args.max_seq_length,
                                                  tokenizer, output_mode)
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Batch size = %d", args.train_batch_size)
    logger.info("  Num steps = %d", num_train_optimization_steps)
    all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                 dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                  dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                   dtype=torch.long)

    if output_mode == "classification_multitask":
        all_label_ids = all_label_ids = torch.tensor(
            [f.label_id for f in train_features], dtype=torch.long)
        num_labels = [2, 3, 3]
    elif output_mode == "classification":
        all_label_ids = torch.tensor([f.label_id for f in train_features],
                                     dtype=torch.long)
    elif output_mode == "regression":
        all_label_ids = torch.tensor([f.label_id for f in train_features],
                                     dtype=torch.float)

    train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
                               all_label_ids)
    if args.local_rank == -1:
        train_sampler = RandomSampler(train_data)
    else:
        train_sampler = DistributedSampler(train_data)
    train_dataloader = DataLoader(train_data,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    t_total = len(train_dataloader
                  ) // args.gradient_accumulation_steps * args.num_train_epochs
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
    scheduler = WarmupLinearSchedule(optimizer,
                                     warmup_steps=t_total *
                                     args.warmup_proportion,
                                     t_total=t_total)

    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        if args.fp16_opt_level == "O2":
            keep_batchnorm_fp32 = False
        else:
            keep_batchnorm_fp32 = True
        model, optimizer = amp.initialize(
            model,
            optimizer,
            opt_level=args.fp16_opt_level,
            keep_batchnorm_fp32=keep_batchnorm_fp32)

    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )
        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
    writer = open(output_eval_file, "w", encoding="utf-8")
    model.train()
    for _ in trange(int(args.num_train_epochs), desc="Epoch"):
        tr_loss = 0
        nb_tr_examples, nb_tr_steps = 0, 0
        epoch_iterator = tqdm(train_dataloader,
                              desc="Train(XX Epoch) Step(X/X) (loss=X.X)",
                              disable=args.local_rank not in [-1, 0])
        model.train()
        for step, batch in enumerate(epoch_iterator):
            batch = tuple(t.to(device) for t in batch)
            input_ids, input_mask, segment_ids, label_ids = batch

            # define a new function to compute loss values for both output_modes
            logits = model(input_ids, segment_ids, input_mask, labels=None)
            if output_mode == "classification_multitask":
                loss_fct0 = CrossEntropyLoss()
                loss_fct1 = CrossEntropyLoss()
                loss_fct2 = CrossEntropyLoss()

                loss0 = loss_fct0(logits[0].view(-1, num_labels[0]),
                                  label_ids[:, 0].view(-1))
                loss1 = loss_fct1(logits[1].view(-1, num_labels[1]),
                                  label_ids[:, 1].view(-1))
                loss2 = loss_fct2(logits[2].view(-1, num_labels[2]),
                                  label_ids[:, 2].view(-1))
                loss = loss0 + loss1 + loss2
            elif output_mode == "classification":
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, num_labels),
                                label_ids.view(-1))
            elif output_mode == "regression":
                loss_fct = MSELoss()
                loss = loss_fct(logits.view(-1), label_ids.view(-1))

            if n_gpu > 1:
                loss = loss.mean()  # mean() to average on multi-gpu.
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            tr_loss += loss.item()
            nb_tr_examples += input_ids.size(0)
            nb_tr_steps += 1
            if (step + 1) % args.gradient_accumulation_steps == 0:
                if args.fp16:
                    torch.nn.utils.clip_grad_norm_(
                        amp.master_params(optimizer), args.max_grad_norm)
                else:
                    torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                   args.max_grad_norm)
                scheduler.step()  # Update learning rate schedule
                optimizer.step()
                optimizer.zero_grad()
                global_step += 1
                epoch_iterator.set_description(
                    "Train(%d Epoch) Step(%d / %d) (loss=%5.5f)" %
                    (_, global_step, t_total, loss.item()))

        model_checkpoint = "%s_%d.bin" % (args.task_name, _)
        logger.info(model_checkpoint)
        output_model_file = os.path.join(args.output_dir, model_checkpoint)
        if n_gpu > 1 or args.local_rank != -1:
            logger.info("***** Saving file *****(module)")
            torch.save(model.module.state_dict(), output_model_file)
        else:
            logger.info("***** Saving file *****")
            torch.save(model.state_dict(), output_model_file)

        if args.do_eval and (args.local_rank == -1
                             or torch.distributed.get_rank() == 0):
            eval_examples = processor.get_dev_examples(args.data_dir)
            eval_features = convert_examples_to_features(
                eval_examples, label_list, args.max_seq_length, tokenizer,
                output_mode)
            logger.info("***** Running evaluation *****")
            logger.info("  Num examples = %d", len(eval_examples))
            logger.info("  Batch size = %d", args.eval_batch_size)
            all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                         dtype=torch.long)
            all_input_mask = torch.tensor(
                [f.input_mask for f in eval_features], dtype=torch.long)
            all_segment_ids = torch.tensor(
                [f.segment_ids for f in eval_features], dtype=torch.long)

            if output_mode == "classification_multitask":
                all_label_ids = torch.tensor(
                    [f.label_id for f in eval_features], dtype=torch.long)
            elif output_mode == "classification":
                all_label_ids = torch.tensor(
                    [f.label_id for f in eval_features], dtype=torch.long)
            elif output_mode == "regression":
                all_label_ids = torch.tensor(
                    [f.label_id for f in eval_features], dtype=torch.float)

            eval_data = TensorDataset(all_input_ids, all_input_mask,
                                      all_segment_ids, all_label_ids)
            # Run prediction for full data
            eval_sampler = SequentialSampler(eval_data)
            eval_dataloader = DataLoader(eval_data,
                                         sampler=eval_sampler,
                                         batch_size=args.eval_batch_size)

            model.eval()
            eval_loss = 0
            nb_eval_steps = 0
            preds = []
            preds0 = []
            preds1 = []
            preds2 = []

            for input_ids, input_mask, segment_ids, label_ids in tqdm(
                    eval_dataloader, desc="Evaluating"):
                input_ids = input_ids.to(device)
                input_mask = input_mask.to(device)
                segment_ids = segment_ids.to(device)
                label_ids = label_ids.to(device)

                with torch.no_grad():
                    logits = model(input_ids,
                                   segment_ids,
                                   input_mask,
                                   labels=None)

                # create eval loss and other metric required by the task
                if output_mode == "classification_multitask":
                    loss_fct0 = CrossEntropyLoss()
                    loss_fct1 = CrossEntropyLoss()
                    loss_fct2 = CrossEntropyLoss()

                    loss0 = loss_fct0(logits[0].view(-1, num_labels[0]),
                                      label_ids[:, 0].view(-1))
                    loss1 = loss_fct1(logits[1].view(-1, num_labels[1]),
                                      label_ids[:, 1].view(-1))
                    loss2 = loss_fct2(logits[2].view(-1, num_labels[2]),
                                      label_ids[:, 2].view(-1))
                    tmp_eval_loss = loss0 + loss1 + loss2
                elif output_mode == "classification":
                    loss_fct = CrossEntropyLoss()
                    tmp_eval_loss = loss_fct(logits.view(-1, num_labels),
                                             label_ids.view(-1))
                elif output_mode == "regression":
                    loss_fct = MSELoss()
                    tmp_eval_loss = loss_fct(logits.view(-1),
                                             label_ids.view(-1))

                eval_loss += tmp_eval_loss.mean().item()
                nb_eval_steps += 1

                if output_mode == "classification_multitask":
                    if len(preds0) == 0:
                        preds0.append(logits[0].detach().cpu().numpy())
                        preds1.append(logits[1].detach().cpu().numpy())
                        preds2.append(logits[2].detach().cpu().numpy())
                    else:
                        preds0[0] = np.append(preds0[0],
                                              logits[0].detach().cpu().numpy(),
                                              axis=0)
                        preds1[0] = np.append(preds1[0],
                                              logits[1].detach().cpu().numpy(),
                                              axis=0)
                        preds2[0] = np.append(preds2[0],
                                              logits[2].detach().cpu().numpy(),
                                              axis=0)
                else:
                    if len(preds) == 0:
                        preds.append(logits.detach().cpu().numpy())
                    else:
                        preds[0] = np.append(preds[0],
                                             logits.detach().cpu().numpy(),
                                             axis=0)

            eval_loss = eval_loss / nb_eval_steps

            if output_mode == "classification_multitask":
                preds0 = preds0[0]
                preds1 = preds1[0]
                preds2 = preds2[0]

                preds0 = np.argmax(preds0, axis=1)
                preds1 = np.argmax(preds1, axis=1)
                preds2 = np.argmax(preds2, axis=1)
                preds = [preds0, preds1, preds2]
            elif output_mode == "classification":
                preds = preds[0]
                preds = np.argmax(preds, axis=1)
            elif output_mode == "regression":
                preds = preds[0]
                preds = np.squeeze(preds)
            result = compute_metrics(task_name, preds, all_label_ids.numpy())
            loss = tr_loss / global_step

            result['eval_loss'] = eval_loss
            result['global_step'] = global_step
            result['loss'] = loss

            logger.info("***** Eval results *****")
            writer.write("%d_epoch\n" % (_ + 1))
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))
            writer.write("\n")
    writer.close()
コード例 #20
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--corpus_path",
                        default=None,
                        type=str,
                        required=True,
                        help="The input training data file (a text file).")
    parser.add_argument("--model_name",
                        type=str,
                        required=True,
                        help="model name")
    parser.add_argument("--gen_config_file",
                        type=str,
                        required=True,
                        help="generator model configuration file")
    parser.add_argument("--disc_config_file",
                        type=str,
                        required=True,
                        help="discriminator model configuration file")

    ## Other parameters
    parser.add_argument(
        "--output_dir",
        default='checkpoint',
        type=str,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--train_batch_size",
                        default=16,
                        type=int,
                        help="Batch size per GPU/CPU for training.")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument("--learning_rate",
                        default=5e-4,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay",
                        default=0.01,
                        type=float,
                        help="Weight deay if we apply some.")
    parser.add_argument("--adam_epsilon",
                        default=1e-6,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument("--num_steps",
                        default=1000000,
                        type=int,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_steps",
                        default=10000,
                        type=int,
                        help="Linear warmup over warmup_steps.")
    parser.add_argument(
        "--mask_prob",
        default=0.15,
        type=float,
        help="Masking probability (Small, Base=0.15 Large=0.25)")

    parser.add_argument('--logging_steps',
                        type=int,
                        default=8,
                        help="Log every X updates steps.")
    parser.add_argument('--save_steps',
                        type=int,
                        default=50000,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")

    parser.add_argument(
        '--fp16',
        action='store_true',
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"
    )
    parser.add_argument(
        '--fp16_opt_level',
        type=str,
        default='O2',
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0.0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="For distributed training: local_rank")
    args = parser.parse_args()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        args.local_rank, device, args.n_gpu, bool(args.local_rank != -1),
        args.fp16)

    # Set seed
    set_seed(args)

    # Load pretrained model and tokenizer
    tokenizer = BertTokenizer(vocab_file='dataset/wiki_vocab_32k_0213.txt',
                              do_lower_case=args.do_lower_case,
                              max_len=args.max_seq_length,
                              do_basic_tokenize=True)
    generator_config = Config(args.gen_config_file)
    discriminator_config = Config(args.disc_config_file)
    model = Electra(generator_config, discriminator_config)

    gen_params = count_parameters(model.generator)
    disc_params = count_parameters(model.discriminator)

    logger.info("Generator Model Parameter: %d" % gen_params)
    logger.info("Discriminator Model Parameter: %d" % disc_params)

    logger.info("Generator Configuration: %s" % generator_config)
    logger.info("Discriminator Configuration: %s" % discriminator_config)
    model.to(args.device)

    args.vocab_size = generator_config.vocab_size
    logger.info("Training parameters %s", args)

    # Training
    train(args, model, tokenizer)
コード例 #21
0
def main():
    parser = argparse.ArgumentParser()
    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--config_file",
                        type=str,
                        required=True,
                        help="model configuration file")
    parser.add_argument("--vocab_file",
                        type=str,
                        required=True,
                        help="tokenizer vocab file")
    parser.add_argument("--checkpoint",
                        default=None,
                        type=str,
                        required=True,
                        help="fine-tuned model checkpoint")
    # Other parameters
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--eval_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--fp16',
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    args = parser.parse_args()

    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()
    args.n_gpu = n_gpu

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)

    logger.info("device: {} n_gpu: {}, 16-bits training: {}".format(
        device, n_gpu, args.fp16))

    set_seed(args)

    task_name = args.task_name.lower()
    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
    output_mode = output_modes[task_name]

    label_list = processor.get_labels()
    num_labels = len(label_list)

    tokenizer = BertTokenizer(args.vocab_file, max_len=args.max_seq_length)

    test_examples = processor.get_test_examples(args.data_dir)

    # Prepare model
    config = Config(args.config_file)
    model = SequenceClassification(config, num_labels=num_labels)
    model.load_state_dict(torch.load(args.checkpoint))
    num_params = count_parameters(model)
    logger.info("Total Parameter: %d" % num_params)
    model.to(device)
    if args.fp16:
        model = model.half()

    test_features = convert_examples_to_features(test_examples, label_list,
                                                 args.max_seq_length,
                                                 tokenizer, output_mode)
    logger.info("***** Running Evaluation *****")
    logger.info("  Num examples = %d", len(test_examples))
    logger.info("  Batch size = %d", args.eval_batch_size)
    all_input_ids = torch.tensor([f.input_ids for f in test_features],
                                 dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in test_features],
                                  dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in test_features],
                                   dtype=torch.long)

    if output_mode == "classification":
        all_label_ids = torch.tensor([f.label_id for f in test_features],
                                     dtype=torch.long)
    elif output_mode == "regression":
        all_label_ids = torch.tensor([f.label_id for f in test_features],
                                     dtype=torch.float)

    test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
                              all_label_ids)
    test_sampler = SequentialSampler(test_data)
    test_dataloader = DataLoader(test_data,
                                 sampler=test_sampler,
                                 batch_size=args.eval_batch_size)

    model.eval()
    preds = []
    for input_ids, input_mask, segment_ids, label_ids in tqdm(
            test_dataloader, desc="Evaluating"):
        input_ids = input_ids.to(device)
        input_mask = input_mask.to(device)
        segment_ids = segment_ids.to(device)
        label_ids = label_ids.to(device)

        with torch.no_grad():
            logits = model(input_ids, segment_ids, input_mask, labels=None)
        if len(preds) == 0:
            preds.append(logits.detach().cpu().numpy())
        else:
            preds[0] = np.append(preds[0],
                                 logits.detach().cpu().numpy(),
                                 axis=0)
    preds = preds[0]
    if output_mode == "classification":
        preds = np.argmax(preds, axis=1)
    elif output_mode == "regression":
        preds = np.squeeze(preds)
    result = compute_metrics(task_name, preds, all_label_ids.numpy())

    logger.info("***** Eval results *****")
    for key in sorted(result.keys()):
        logger.info("  %s = %s", key, str(result[key]))
コード例 #22
0
ファイル: main.py プロジェクト: jiachenwestlake/Entity_BERT
def train(hp):
    tokenizer = BertTokenizer.from_pretrained(hp.bert_model_dir, do_lower_case=False)
    if hp.dataset == "lm_raw_data_finance":
        dict_file = "../data/dataset_finance/raw_data/annual_report_entity_list"
    elif hp.dataset == "lm_raw_data_novel":
        dict_file = "../data/dataset_book9/raw_data/entity_book9"
    elif hp.dataset == "lm_raw_data_thuner":
        dict_file = "../data/dataset_thuner/raw_data/thu_entity.txt"
    entity_dict = EntityDict(hp.dataset, dict_file)
    entity_dict.load(os.path.join(hp.bert_model_dir, 'entity.dict'))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    ner_label = NerLabel([hp.trainset, hp.validset])
    fname = os.path.join(hp.logdir, 'dict.pt')
    ner_label.save(fname)

    train_dataset = NerDataset(hp.trainset, ner_label, tokenizer, entity_dict)
    eval_dataset = NerDataset(hp.validset, ner_label, tokenizer, entity_dict)
    test_dataset = NerDataset(hp.testset, ner_label, tokenizer, entity_dict)

    model = Net(hp.bert_model_dir, hp.top_rnns, len(ner_label.VOCAB), entity_dict.entity_num, device, hp.finetuning).to(device)
    device_ids = [0, 1]
    model = nn.DataParallel(model, device_ids=device_ids)

    train_iter = data.DataLoader(dataset=train_dataset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 num_workers=4,
                                 collate_fn=pad)
    eval_iter = data.DataLoader(dataset=eval_dataset,
                                batch_size=hp.batch_size,
                                shuffle=False,
                                num_workers=4,
                                collate_fn=pad)
    test_iter = data.DataLoader(dataset=test_dataset,
                                batch_size=hp.batch_size,
                                shuffle=False,
                                num_workers=4,
                                collate_fn=pad)

    optimizer = optim.Adam(model.parameters(), lr=hp.lr)
    criterion = nn.CrossEntropyLoss(ignore_index=0)

    ## train the model
    best_eval = -10
    for epoch in range(1, hp.n_epochs + 1):
        train_epoch(model, train_iter, optimizer, criterion, tokenizer)

        print(f"=========eval at epoch={epoch}=========")
        if not os.path.exists(hp.logdir): os.makedirs(hp.logdir)
        fname = os.path.join(hp.logdir, 'model')
        precision, recall, f1 = evaluate(model, eval_iter, fname, ner_label, verbose=False)

        if f1 > best_eval:
            best_eval = f1
            print("epoch{} get the best eval f-score:{}".format(epoch, best_eval))
            torch.save(model.state_dict(), f"{fname}.pt")
            print(f"weights were saved to {fname}.pt")

        print(f"=========test at epoch={epoch}=========")
        if not os.path.exists(hp.logdir): os.makedirs(hp.logdir)
        fname = os.path.join(hp.logdir, str(epoch))
        precision, recall, f1 = evaluate(model, test_iter, fname, ner_label, verbose=False)
コード例 #23
0
ファイル: run_qa.py プロジェクト: onacloud/kor_pretrain_LM
        torch.distributed.init_process_group(backend='nccl')
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt = '%m/%d/%Y %H:%M:%S',
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
                   args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)

    # Set seed
    set_seed(args)

    tokenizer = BertTokenizer(args.vocab_file,
                              max_len=args.max_seq_length,
                              do_basic_tokenize=True)
    # Prepare model
    config = Config.from_json_file(args.config_file)
    model = QuestionAnswering(config)
    model.bert.load_state_dict(torch.load(args.checkpoint))
    num_params = count_parameters(model)
    logger.info("Total Parameter: %d" % num_params)
    model.to(device)

    logger.info("Training hyper-parameters %s", args)

    # Training
    train_dataset = load_and_cache_examples(args, tokenizer)
    train(args, train_dataset, model)
コード例 #24
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("--checkpoint",
                        default='output/korquad_3.bin',
                        type=str,
                        help="checkpoint")
    parser.add_argument(
        "--output_dir",
        default='debug',
        type=str,
        help=
        "The output directory where the model checkpoints and predictions will be written."
    )
    parser.add_argument("--model_config", type=str)
    parser.add_argument("--vocab", type=str)

    ## Other parameters
    parser.add_argument(
        "--predict_file",
        default='data/KorQuAD_v1.0_dev.json',
        type=str,
        help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json"
    )

    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. Sequences "
        "longer than this will be truncated, and sequences shorter than this will be padded."
    )
    parser.add_argument(
        "--doc_stride",
        default=64,
        type=int,
        help=
        "When splitting up a long document into chunks, how much stride to take between chunks."
    )
    parser.add_argument(
        "--max_query_length",
        default=96,
        type=int,
        help=
        "The maximum number of tokens for the question. Questions longer than this will "
        "be truncated to this length.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")

    parser.add_argument("--batch_size",
                        default=16,
                        type=int,
                        help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument(
        "--n_best_size",
        default=20,
        type=int,
        help=
        "The total number of n-best predictions to generate in the nbest_predictions.json output file."
    )
    parser.add_argument(
        "--max_answer_length",
        default=30,
        type=int,
        help=
        "The maximum length of an answer that can be generated. This is needed because the start "
        "and end predictions are not conditioned on one another.")
    parser.add_argument(
        "--verbose_logging",
        action='store_true',
        help=
        "If true, all of the warnings related to data processing will be printed. "
        "A number of warnings are expected for a normal SQuAD evaluation.")

    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--fp16',
        action='store_true',
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"
    )
    parser.add_argument('--ensemble', default='false', type=str)
    args = parser.parse_args()
    args.ensemble = args.ensemble.lower() == 'true'

    # Setup CUDA, GPU & distributed training
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    args.n_gpu = torch.cuda.device_count()
    args.device = device

    # Setup logging
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)
    logger.info("device: %s, n_gpu: %s, 16-bits training: %s", device,
                args.n_gpu, args.fp16)

    # Set seed
    set_seed(args)

    tokenizer = BertTokenizer(vocab_file=args.vocab,
                              do_basic_tokenize=True,
                              max_len=args.max_seq_length)
    config = Config.from_json_file(args.model_config)
    model = QuestionAnswering(config)

    # Evaluate
    examples, features = load_and_cache_examples(args, tokenizer)

    if not args.ensemble:
        logger.info(" Load Model: %s ", args.checkpoint)
        model.load_state_dict(torch.load(args.checkpoint))
        num_params = count_parameters(model)
        logger.info("Total Parameter: %d" % num_params)
        if args.fp16:
            model.half()
        model.to(args.device)

        logger.info("Evaluation parameters %s", args)
        results = evaluate(args, model, examples, features)
    else:
        list_ckpts = []
        with open(os.path.join(args.output_dir, "ckpt_list.txt"), 'r') as f:
            for line in f:
                list_ckpts.append(line.strip())

        list_results = []
        for i, ckpt in enumerate(list_ckpts):
            fn = os.path.join(args.output_dir, ckpt)
            logger.info(" Load Model: %s ", fn)
            model.load_state_dict(torch.load(fn))
            num_params = count_parameters(model)
            logger.info("Total Parameter: %d" % num_params)
            if args.fp16:
                model.half()
            model.to(args.device)

            logger.info("Evaluation parameters %s", args)
            results = evaluate(args, model, examples, features)
            list_results.append(results)

        list_qid = []
        for example in examples:
            list_qid.append(example.qas_id)

        all_predictions = collections.OrderedDict()
        for qid in list_qid:
            max_prob, answer = 0.0, ""
            for results in list_results:
                prob, text = 0.0, None
                for output in results[qid]:
                    if output["text"]:
                        prob = output["probability"]
                        text = output["text"]
                        break
                if prob > max_prob:
                    max_prob = prob
                    answer = text

            all_predictions[qid] = answer

        output_prediction_file = os.path.join(args.output_dir,
                                              "predictions.json")
        with open(output_prediction_file, "w") as writer:
            writer.write(json.dumps(all_predictions, indent=4) + "\n")

        expected_version = 'KorQuAD_v1.0'
        with open(args.predict_file) as dataset_file:
            dataset_json = json.load(dataset_file)
            read_version = "_".join(dataset_json['version'].split("_")[:-1])
            if (read_version != expected_version):
                logger.info('Evaluation expects ' + expected_version +
                            ', but got dataset with ' + read_version,
                            file=sys.stderr)
            dataset = dataset_json['data']
        with open(os.path.join(args.output_dir,
                               "predictions.json")) as prediction_file:
            predictions = json.load(prediction_file)
        logger.info(json.dumps(korquad_eval(dataset, predictions)))
コード例 #25
0
        try:
            from apex.fp16_utils import FP16_Optimizer
        except:
            logger.info('WARNING: apex not installed, ignoring --fp16 option')
            args.fp16 = False

# object of <class 'torch.device'>
device = torch.device(
    'cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
n_gpu = torch.cuda.device_count()

########################################################################################################################
# Load Data
########################################################################################################################
## using the Bert Word Vocabulary
tokenizer = BertTokenizer.from_pretrained(args.bert_model,
                                          do_lower_case=args.do_lower_case)
vocab_size = len(tokenizer.vocab)
corpus = load_lm_data(args.entity_dict, args.data, args.output_dir,
                      args.dataset, tokenizer)
## Training Dataset
train_iter = corpus.get_iterator('train',
                                 args.batch_size,
                                 args.max_seq_length,
                                 args.max_doc_length,
                                 device=device)

## total batch numbers and optim updating steps
total_train_steps = int(train_iter.batch_steps * args.num_train_epochs)

########################################################################################################################
# Building the model
コード例 #26
0
ファイル: eval.py プロジェクト: taeminlee/korquad-challenge
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("--checkpoint",
                        default='output/korquad_3.bin',
                        type=str,
                        help="checkpoint")
    parser.add_argument(
        "--output_dir",
        default='debug',
        type=str,
        help=
        "The output directory where the model checkpoints and predictions will be written."
    )

    ## Other parameters
    parser.add_argument(
        "--predict_file",
        default='data/KorQuAD_v1.0_dev.json',
        type=str,
        help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json"
    )
    parser.add_argument(
        "--config_name",
        default="data/bert_small.json",
        type=str,
        help="Pretrained config name or path if not the same as model_name")

    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. Sequences "
        "longer than this will be truncated, and sequences shorter than this will be padded."
    )
    parser.add_argument(
        "--doc_stride",
        default=64,
        type=int,
        help=
        "When splitting up a long document into chunks, how much stride to take between chunks."
    )
    parser.add_argument(
        "--max_query_length",
        default=96,
        type=int,
        help=
        "The maximum number of tokens for the question. Questions longer than this will "
        "be truncated to this length.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")

    parser.add_argument("--batch_size",
                        default=16,
                        type=int,
                        help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument(
        "--n_best_size",
        default=20,
        type=int,
        help=
        "The total number of n-best predictions to generate in the nbest_predictions.json output file."
    )
    parser.add_argument(
        "--max_answer_length",
        default=30,
        type=int,
        help=
        "The maximum length of an answer that can be generated. This is needed because the start "
        "and end predictions are not conditioned on one another.")
    parser.add_argument(
        "--verbose_logging",
        action='store_true',
        help=
        "If true, all of the warnings related to data processing will be printed. "
        "A number of warnings are expected for a normal SQuAD evaluation.")

    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--fp16',
        action='store_true',
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"
    )
    args = parser.parse_args()

    # Setup CUDA, GPU & distributed training
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    args.n_gpu = torch.cuda.device_count()
    args.device = device

    # Setup logging
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)
    logger.info("device: %s, n_gpu: %s, 16-bits training: %s", device,
                args.n_gpu, args.fp16)

    # Set seed
    set_seed(args)

    tokenizer = BertTokenizer(vocab_file='data/wiki_vocab_32k.txt',
                              do_basic_tokenize=True,
                              max_len=args.max_seq_length)
    config = Config.from_json_file(args.config_name)
    model = QuestionAnswering(config)
    model.load_state_dict(torch.load(args.checkpoint))
    num_params = count_parameters(model)
    logger.info("Total Parameter: %d" % num_params)
    if args.fp16:
        model.half()
    model.to(args.device)

    logger.info("Evaluation parameters %s", args)

    # Evaluate
    examples, features = load_and_cache_examples(args, tokenizer)
    evaluate(args, model, examples, features)
コード例 #27
0
def main():
    parser = argparse.ArgumentParser()
    # Parameters
    parser.add_argument(
        "--output_dir",
        default='debug',
        type=str,
        help=
        "The output directory where the model checkpoints and predictions will be written."
    )
    parser.add_argument("--checkpoint",
                        default='output/korquad_3.bin',
                        type=str,
                        help="fine-tuned model checkpoint")
    parser.add_argument("--config_file",
                        default='data/large_config.json',
                        type=str,
                        help="model configuration file")
    parser.add_argument("--vocab_file",
                        default='data/large_v1_32k_vocab.txt',
                        type=str,
                        help="tokenizer vocab file")
    parser.add_argument("--check_list", default=False, type=bool)
    parser.add_argument("--eda_type",
                        default="no_eda",
                        type=str,
                        help="sr, ri , rd, rs")

    parser.add_argument(
        "--predict_file",
        default='data/korquad/KorQuAD_v1.0_dev.json',
        type=str,
        help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json"
    )

    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. Sequences "
        "longer than this will be truncated, and sequences shorter than this will be padded."
    )
    parser.add_argument(
        "--doc_stride",
        default=64,
        type=int,
        help=
        "When splitting up a long document into chunks, how much stride to take between chunks."
    )
    parser.add_argument(
        "--max_query_length",
        default=64,
        type=int,
        help=
        "The maximum number of tokens for the question. Questions longer than this will "
        "be truncated to this length.")
    parser.add_argument(
        "--max_answer_length",
        default=30,
        type=int,
        help=
        "The maximum length of an answer that can be generated. This is needed because the start "
        "and end predictions are not conditioned on one another.")

    parser.add_argument("--batch_size",
                        default=16,
                        type=int,
                        help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument(
        "--n_best_size",
        default=20,
        type=int,
        help=
        "The total number of n-best predictions to generate in the nbest_predictions."
        "json output file.")
    parser.add_argument(
        "--verbose_logging",
        action='store_true',
        help=
        "If true, all of the warnings related to data processing will be printed. "
        "A number of warnings are expected for a normal SQuAD evaluation.")
    parser.add_argument("--just_eval", default=False, type=bool)
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")

    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--fp16',
        action='store_true',
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"
    )
    args = parser.parse_args()

    # Setup CUDA, GPU & distributed training
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    args.n_gpu = torch.cuda.device_count()
    args.device = device

    # Setup logging
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)
    logger.info("device: %s, n_gpu: %s, 16-bits training: %s", device,
                args.n_gpu, args.fp16)

    # Set seed
    set_seed(args)

    tokenizer = BertTokenizer(vocab_file=args.vocab_file,
                              do_basic_tokenize=True,
                              max_len=args.max_seq_length)
    config = Config.from_json_file(args.config_file)
    model = QuestionAnswering(config)
    if args.check_list:
        model_list = os.listdir(args.checkpoint)
        model_list.sort()
        result_dict = defaultdict(list)
        eda_list = ["st_rs", "st_rd", "rs", "rd", "sr", "ri", "no_eda"]
        for m in model_list:
            model.load_state_dict(torch.load(args.checkpoint + '/' + m))
            num_params = count_parameters(model)
            logger.info("Total Parameter: %d" % num_params)
            if args.fp16:
                model.half()
            model.to(args.device)
            logger.info("Evaluation parameters %s", args)
            for eda in eda_list:
                if eda in m:
                    args.eda_type = eda
                    break
            # Evaluate
            examples, features = load_and_cache_examples(args, tokenizer)
            r = evaluate(args, model, examples, features)
            em = r["exact_match"]
            f1 = r["f1"]
            result_dict[args.eda_type].append([em, f1, m])
        print(result_dict)

    else:
        model.load_state_dict(torch.load(args.checkpoint))
        num_params = count_parameters(model)
        logger.info("Total Parameter: %d" % num_params)
        if args.fp16:
            model.half()
        model.to(args.device)
        logger.info("Evaluation parameters %s", args)
        # Evaluate
        examples, features = load_and_cache_examples(args, tokenizer)
        evaluate(args, model, examples, features)