Exemplo n.º 1
0
    def __init__(self, model, config, **kwargs):
        super().__init__(model, kwargs['embedding'], kwargs['train_loader'],
                         config, None, kwargs['test_evaluator'],
                         kwargs['dev_evaluator'])

        if config['model'] in {
                'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'
        }:
            variant = 'bert-large-uncased' if config[
                'model'] == 'BERT-Large' else 'bert-base-uncased'
            self.tokenizer = BertTokenizer.from_pretrained(
                variant, is_lowercase=config['is_lowercase'])
            self.processor = kwargs['processor']
            self.optimizer = config['optimizer']
            self.train_examples = self.processor.get_train_examples(
                config['data_dir'], topic=config['topic'])
            self.num_train_optimization_steps = int(
                len(self.train_examples) / config['batch_size'] /
                config['gradient_accumulation_steps']) * config['epochs']
        self.config = config
        self.early_stop = False
        self.best_dev_ap = 0
        self.iterations = 0
        self.unimproved_iters = 0

        self.log_header = 'Epoch Iteration Progress   Dev/Acc.  Dev/Pr.  Dev/AP.   Dev/F1   Dev/Loss'
        self.log_template = ' '.join(
            '{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'
            .split(','))

        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        self.snapshot_path = os.path.join(self.model_outfile,
                                          config['dataset'].NAME,
                                          '%s.pt' % timestamp)
Exemplo n.º 2
0
    def __init__(self, model, optimizer, processor, args):
        self.args = args
        self.model = model
        self.optimizer = optimizer
        self.processor = processor
        self.train_examples = self.processor.get_train_examples(args.data_dir)
        self.tokenizer = BertTokenizer.from_pretrained(
            args.model, is_lowercase=args.is_lowercase)

        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        self.snapshot_path = os.path.join(self.args.save_path,
                                          self.processor.NAME,
                                          '%s.pt' % timestamp)

        self.num_train_optimization_steps = int(
            len(self.train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            self.num_train_optimization_steps = args.num_train_optimization_steps // torch.distributed.get_world_size(
            )
        self.log_header = 'Epoch Iteration Progress   Dev/Acc.  Dev/Hamm.  Dev/Jacc.   Dev/Prec Dev/Rec Dev/micro-F1 Dev/F1  Dev/Loss'
        self.log_template = ' '.join(
            '{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'
            .split(','))

        self.iterations, self.nb_tr_steps, self.tr_loss = 0, 0, 0
        self.best_dev_f1, self.unimproved_iters = 0, 0
        self.early_stop = False
    def __init__(self, model, config, **kwargs):
        super().__init__(kwargs['dataset'],
                         model,
                         kwargs['embedding'],
                         kwargs['data_loader'],
                         batch_size=config['batch_size'],
                         device=config['device'])

        if config['model'] in {
                'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'
        }:
            variant = 'bert-large-uncased' if config[
                'model'] == 'BERT-Large' else 'bert-base-uncased'
            self.tokenizer = BertTokenizer.from_pretrained(
                variant, is_lowercase=config['is_lowercase'])
            self.processor = kwargs['processor']
            if config['split'] == 'test':
                self.eval_examples = self.processor.get_test_examples(
                    config['data_dir'], topic=config['topic'])
            else:
                self.eval_examples = self.processor.get_dev_examples(
                    config['data_dir'], topic=config['topic'])

        self.config = config
        self.ignore_lengths = config['ignore_lengths']
        self.y_target = None
        self.y_pred = None
        self.docid = None
Exemplo n.º 4
0
def load_text(texts: List[str]):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    max_pos = 512
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
                                              do_lower_case=True)
    sep_vid = tokenizer.vocab['[SEP]']
    cls_vid = tokenizer.vocab['[CLS]']

    n_lines = len(texts)

    def _process_src(raw):
        raw = raw.strip().lower()
        src_subtokens = tokenizer.tokenize(raw)
        src_subtokens = ['[CLS]'] + src_subtokens + ['[SEP]']
        src_subtoken_idxs = tokenizer.convert_tokens_to_ids(src_subtokens)
        src_subtoken_idxs = src_subtoken_idxs[:-1][:max_pos]
        src_subtoken_idxs[-1] = sep_vid
        _segs = [-1] + \
            [i for i, t in enumerate(src_subtoken_idxs) if t == sep_vid]
        segs = [_segs[i] - _segs[i - 1] for i in range(1, len(_segs))]
        segments_ids = []
        segs = segs[:max_pos]
        for i, s in enumerate(segs):
            if (i % 2 == 0):
                segments_ids += s * [0]
            else:
                segments_ids += s * [1]

        src = torch.tensor(src_subtoken_idxs)[None, :].to(device)
        mask_src = (1 - (src == 0).float()).to(device)
        cls_ids = [[
            i for i, t in enumerate(src_subtoken_idxs) if t == cls_vid
        ]]
        clss = torch.tensor(cls_ids).to(device)
        mask_cls = 1 - (clss == -1).float()
        clss[clss == -1] = 0

        return src, mask_src, segments_ids, clss, mask_cls

    for x in tqdm(texts, total=n_lines):
        src, mask_src, segments_ids, clss, mask_cls = _process_src(x)
        segs = torch.tensor(segments_ids)[None, :].to(device)
        batch = Batch()
        batch.src = src
        batch.tgt = None
        batch.mask_src = mask_src
        batch.mask_tgt = None
        batch.segs = segs
        batch.src_str = [[
            sent.replace('[SEP]', '').strip() for sent in x.split('[CLS]')
        ]]
        batch.tgt_str = ['']
        batch.clss = clss
        batch.mask_cls = mask_cls

        batch.batch_size = 1
        yield batch
Exemplo n.º 5
0
 def __init__(self, model, processor, args, split='dev'):
     self.args = args
     self.model = model
     self.processor = processor
     self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
     if split == 'test':
         self.eval_examples = self.processor.get_test_examples(args.data_dir)
     else:
         self.eval_examples = self.processor.get_dev_examples(args.data_dir)
Exemplo n.º 6
0
 def __init__(self, model, processor, args, split='dev'):
     self.args = args
     self.model = model
     self.processor = processor
     self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
     self.emotioner = Emotion(args.nrc_path, args.max_em_len, args.emotion_filters)
     if split == 'test':
         self.eval_examples = self.processor.get_test_examples(args.data_dir, args.test_name)
     elif split == 'dev':
         self.eval_examples = self.processor.get_dev_examples(args.data_dir, args.dev_name)
     else:
         self.eval_examples = self.processor.get_any_examples(args.data_dir, split)
Exemplo n.º 7
0
def decode(hp):
    tokenizer = BertTokenizer.from_pretrained(hp.bert_model_dir, do_lower_case=False)
    if hp.dataset == "lm_raw_data_finance":
        dict_file = "../data/dataset_finance/annual_report_entity_list"
    elif hp.dataset == "lm_raw_data_novel":
        dict_file = "../data/dataset_book9/entity_book9"
    entity_dict = EntityDict(hp.dataset, dict_file)
    entity_dict.load(os.path.join(hp.bert_model_dir, 'entity.dict'))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    ner_label = NerLabel([hp.decodeset])
    if os.path.exists(os.path.join(hp.logdir, 'dict.pt')):
        ner_label.load(os.path.join(hp.logdir, 'dict.pt'))
    else:
        print('dict.pt is not exit')
        exit()

    decode_dataset = NerDataset(hp.decodeset, ner_label, tokenizer, entity_dict)

    model = Net(hp.bert_model_dir, hp.top_rnns, len(ner_label.VOCAB), entity_dict.entity_num, device, hp.finetuning).to(device)
    model = nn.DataParallel(model)
    ## Load the model parameters
    if os.path.exists(os.path.join(hp.logdir, 'model.pt')):
        model.load_state_dict(torch.load(os.path.join(hp.logdir, 'model.pt')))
    else:
        print("the pretrianed model path does not exist! ")
        exit()

    decode_iter = data.DataLoader(dataset=decode_dataset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 num_workers=4,
                                 collate_fn=pad)

    fname = os.path.join(hp.logdir, '_')

    precision, recall, f1 = evaluate(model, decode_iter, fname, ner_label, verbose=False)
Exemplo n.º 8
0
def train(hp):
    tokenizer = BertTokenizer.from_pretrained(hp.bert_model_dir, do_lower_case=False)
    if hp.dataset == "lm_raw_data_finance":
        dict_file = "../data/dataset_finance/raw_data/annual_report_entity_list"
    elif hp.dataset == "lm_raw_data_novel":
        dict_file = "../data/dataset_book9/raw_data/entity_book9"
    elif hp.dataset == "lm_raw_data_thuner":
        dict_file = "../data/dataset_thuner/raw_data/thu_entity.txt"
    entity_dict = EntityDict(hp.dataset, dict_file)
    entity_dict.load(os.path.join(hp.bert_model_dir, 'entity.dict'))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    ner_label = NerLabel([hp.trainset, hp.validset])
    fname = os.path.join(hp.logdir, 'dict.pt')
    ner_label.save(fname)

    train_dataset = NerDataset(hp.trainset, ner_label, tokenizer, entity_dict)
    eval_dataset = NerDataset(hp.validset, ner_label, tokenizer, entity_dict)
    test_dataset = NerDataset(hp.testset, ner_label, tokenizer, entity_dict)

    model = Net(hp.bert_model_dir, hp.top_rnns, len(ner_label.VOCAB), entity_dict.entity_num, device, hp.finetuning).to(device)
    device_ids = [0, 1]
    model = nn.DataParallel(model, device_ids=device_ids)

    train_iter = data.DataLoader(dataset=train_dataset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 num_workers=4,
                                 collate_fn=pad)
    eval_iter = data.DataLoader(dataset=eval_dataset,
                                batch_size=hp.batch_size,
                                shuffle=False,
                                num_workers=4,
                                collate_fn=pad)
    test_iter = data.DataLoader(dataset=test_dataset,
                                batch_size=hp.batch_size,
                                shuffle=False,
                                num_workers=4,
                                collate_fn=pad)

    optimizer = optim.Adam(model.parameters(), lr=hp.lr)
    criterion = nn.CrossEntropyLoss(ignore_index=0)

    ## train the model
    best_eval = -10
    for epoch in range(1, hp.n_epochs + 1):
        train_epoch(model, train_iter, optimizer, criterion, tokenizer)

        print(f"=========eval at epoch={epoch}=========")
        if not os.path.exists(hp.logdir): os.makedirs(hp.logdir)
        fname = os.path.join(hp.logdir, 'model')
        precision, recall, f1 = evaluate(model, eval_iter, fname, ner_label, verbose=False)

        if f1 > best_eval:
            best_eval = f1
            print("epoch{} get the best eval f-score:{}".format(epoch, best_eval))
            torch.save(model.state_dict(), f"{fname}.pt")
            print(f"weights were saved to {fname}.pt")

        print(f"=========test at epoch={epoch}=========")
        if not os.path.exists(hp.logdir): os.makedirs(hp.logdir)
        fname = os.path.join(hp.logdir, str(epoch))
        precision, recall, f1 = evaluate(model, test_iter, fname, ner_label, verbose=False)
Exemplo n.º 9
0
    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = True
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if not args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir)
        num_train_optimization_steps = int(
            len(train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--train_corpus",
                        default=None,
                        type=str,
                        required=True,
                        help="The input train corpus.")
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--learning_rate",
                        default=3e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--on_memory",
                        action='store_true',
                        help="Whether to load train samples into memory or use disk")
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Whether to lower case the input text. True for uncased models, False for cased models.")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumualte before performing a backward/update pass.")
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type = float, default = 0,
                        help = "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                        "0 (default value): dynamic loss scaling.\n"
                        "Positive power of 2: static loss scaling value.\n")

    args = parser.parse_args()

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train:
        raise ValueError("Training is currently the only implemented execution option. Please set `do_train`.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)

    #train_examples = None
    num_train_optimization_steps = None
    if args.do_train:
        print("Loading Train Dataset", args.train_corpus)
        train_dataset = BERTDataset(args.train_corpus, tokenizer, seq_len=args.max_seq_length,
                                    corpus_lines=None, on_memory=args.on_memory)
        num_train_optimization_steps = int(
            len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()

    # Prepare model
    model = BertForPreTraining.from_pretrained(args.bert_model)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    if args.do_train:
        param_optimizer = list(model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
            ]

        if args.fp16:
            try:
                from apex.optimizers import FP16_Optimizer
                from apex.optimizers import FusedAdam
            except ImportError:
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

            optimizer = FusedAdam(optimizer_grouped_parameters,
                                  lr=args.learning_rate,
                                  bias_correction=False,
                                  max_grad_norm=1.0)
            if args.loss_scale == 0:
                optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            else:
                optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
            warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
                                                 t_total=num_train_optimization_steps)

        else:
            optimizer = BertAdam(optimizer_grouped_parameters,
                                 lr=args.learning_rate,
                                 warmup=args.warmup_proportion,
                                 t_total=num_train_optimization_steps)

    global_step = 0
    if args.do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_dataset))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)

        if args.local_rank == -1:
            train_sampler = RandomSampler(train_dataset)
        else:
            #TODO: check if this works with current data generator from disk that relies on next(file)
            # (it doesn't return item back by index)
            train_sampler = DistributedSampler(train_dataset)
        train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)

        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
                loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

        # Save a trained model
        logger.info("** ** * Saving fine - tuned model ** ** * ")
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
        if args.do_train:
            torch.save(model_to_save.state_dict(), output_model_file)
            model_to_save.config.to_json_file(output_config_file)
            tokenizer.save_vocabulary(args.output_dir)
Exemplo n.º 11
0
        try:
            from apex.fp16_utils import FP16_Optimizer
        except:
            logger.info('WARNING: apex not installed, ignoring --fp16 option')
            args.fp16 = False

# object of <class 'torch.device'>
device = torch.device(
    'cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
n_gpu = torch.cuda.device_count()

########################################################################################################################
# Load Data
########################################################################################################################
## using the Bert Word Vocabulary
tokenizer = BertTokenizer.from_pretrained(args.bert_model,
                                          do_lower_case=args.do_lower_case)
vocab_size = len(tokenizer.vocab)
corpus = load_lm_data(args.entity_dict, args.data, args.output_dir,
                      args.dataset, tokenizer)
## Training Dataset
train_iter = corpus.get_iterator('train',
                                 args.batch_size,
                                 args.max_seq_length,
                                 args.max_doc_length,
                                 device=device)

## total batch numbers and optim updating steps
total_train_steps = int(train_iter.batch_steps * args.num_train_epochs)

########################################################################################################################
# Building the model
Exemplo n.º 12
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {'News_art': News_artProcessor, 'News': News_Processor}

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    num_train_optimization_steps = None
    if args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            math.ceil(len(train_examples) / args.batch_size) /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    model_bert = BertForSequenceClassification.from_pretrained(args.model,
                                                               num_labels=4)

    model_bert.to(device)

    if args.trained_model:
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage
                            )  # load personality model
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]

        del state['classifier.weight']  # removing  personality classifier!
        del state['classifier.bias']
        model_bert.load_state_dict(state, strict=False)
        model_bert = model_bert.to(device)
    args.freez_bert = False
    evaluate(model_bert,
             processor,
             args,
             last_bert_layers=-1,
             ngram_range=(1, 1))
Exemplo n.º 13
0
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
import math
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectPercentile, chi2
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

# String templates for logging results
LOG_HEADER = 'Split  Dev/Acc.  Dev/Pr.  Dev/Re.   Dev/F1'
LOG_TEMPLATE = ' '.join('{:>5s},{:>9.4f},{:>8.4f},{:8.4f},{:8.4f}'.split(','))

bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
                                               is_lowercase=True)


def get_feature_vector(evaluators,
                       use_idf=False,
                       ngram_range=(1, 1),
                       max_seq_len=256):

    train_ev, dev_ev, test_ev = evaluators
    train_text, test_text, dev_text = [], [], []
    for i, x in enumerate(train_ev.eval_examples):
        #tokens_a = x.text_a.strip().split()
        tokens_a = [t for t in x.text_a.strip().split() if t not in ['', ' ']]
        #tokens_a = bert_tokenizer.tokenize(x.text_a)
        tokens_a = tokens_a[:max_seq_len]
        train_text.append(' '.join(tokens_a))
Exemplo n.º 14
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {
        'SST-2': SST2Processor,
        'Reuters': ReutersProcessor,
        'IMDB': IMDBProcessor,
        'AAPD': AAPDProcessor,
        'AGNews': AGNewsProcessor,
        'Yelp2014': Yelp2014Processor,
        'Sogou': SogouProcessor,
        'Personality': PersonalityProcessor,
        'News_art': News_artProcessor,
        'News': News_Processor,
        'UCI_yelp': UCI_yelpProcessor,
        'Procon': ProconProcessor,
        'Style': StyleProcessor,
        'ProconDual': ProconDualProcessor,
        'Pan15': Pan15_Processor,
        'Pan14E': Pan14E_Processor,
        'Pan14N': Pan14N_Processor,
        'Perspectrum': PerspectrumProcessor
    }

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if not args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            len(train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
    model = BertForSequenceClassification.from_pretrained(
        args.model, cache_dir=cache_dir, num_labels=args.num_labels)

    if args.fp16:
        model.half()
    model.to(device)

    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Install NVIDIA Apex to use distributed and FP16 training.")
        model = DDP(model)
    '''elif n_gpu > 1: changed by marjan

        model = torch.nn.DataParallel(model)'''

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install NVIDIA Apex for distributed and FP16 training")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.lr,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.lr,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

    trainer = BertTrainer(model, optimizer, processor, args)

    if not args.trained_model:
        trainer.train()
        model = torch.load(trainer.snapshot_path)
    else:
        model = BertForSequenceClassification.from_pretrained(
            args.model, num_labels=args.num_labels)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
        model = model.to(device)

    evaluate_split(model, processor, args, split=args.dev_name)
    evaluate_split(model, processor, args, split=args.test_name)
Exemplo n.º 15
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {'News_art': News_artProcessor, 'News': News_Processor}

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            math.ceil(len(train_examples) / args.batch_size) /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
    model = BertForSequenceClassification.from_pretrained(
        args.model, num_labels=2)  # creating news model!
    #model = BertForSequenceClassification.from_pretrained(args.model, cache_dir=cache_dir, num_labels=args.num_labels)

    if args.fp16:
        model.half()
    model.to(device)

    #model = BertForSequenceClassification.from_pretrained(args.model, num_labels=args.num_labels)
    model_ = torch.load(
        args.trained_model,
        map_location=lambda storage, loc: storage)  # load personality model
    state = {}
    for key in model_.state_dict().keys():
        new_key = key.replace("module.", "")
        state[new_key] = model_.state_dict()[key]

    del state['classifier.weight']  # removing  personality classifier!
    del state['classifier.bias']
    model.load_state_dict(state, strict=False)
    model = model.to(device)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    print('t_total :', num_train_optimization_steps)
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=args.lr,
                         warmup=args.warmup_proportion,
                         t_total=num_train_optimization_steps)
    args.freez_bert = False
    trainer = BertTrainer(model, optimizer, processor, args)

    trainer.train()
    model = torch.load(trainer.snapshot_path)

    evaluate_split(model, processor, args, split=args.dev_name)
    evaluate_split(model, processor, args, split=args.test_name)