Exemple #1
0
 def __init__(self, config, use_gpu):
     # set up configurations
     # get pretrained word vectors
     self.pretrain = Pretrain(config['pretrain_path'])
     # set up trainer
     self.trainer = Trainer(pretrain=self.pretrain,
                            model_file=config['model_path'],
                            use_cuda=use_gpu)
     self.build_final_config(config)
Exemple #2
0
 def _set_up_model(self, config, use_gpu):
     # set up trainer
     self._args = {
         'charlm_forward_file': config['forward_charlm_path'],
         'charlm_backward_file': config['backward_charlm_path']
     }
     self._pretrain = Pretrain(config['pretrain_path'])
     self._trainer = Trainer(args=self._args,
                             pretrain=self.pretrain,
                             model_file=config['model_path'],
                             use_cuda=use_gpu)
Exemple #3
0
def evaluate(args):
    # file paths
    system_pred_file = args['output_file']
    gold_file = args['gold_file']
    model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
            else '{}/{}_tagger.pt'.format(args['save_dir'], args['shorthand'])
    pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'],
                                               args['save_name'])

    # load pretrain
    pretrain = Pretrain(pretrain_file)

    # load model
    print("Loading model from: {}".format(model_file))
    use_cuda = args['cuda'] and not args['cpu']
    trainer = Trainer(pretrain=pretrain,
                      model_file=model_file,
                      use_cuda=use_cuda)
    loaded_args, vocab = trainer.args, trainer.vocab

    # load config
    for k in args:
        if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'
                                                              ] or k == 'mode':
            loaded_args[k] = args[k]

    # load data
    print("Loading data with batch size {}...".format(args['batch_size']))
    batch = DataLoader(args['eval_file'],
                       args['batch_size'],
                       loaded_args,
                       pretrain,
                       vocab=vocab,
                       evaluation=True)

    if len(batch) > 0:
        print("Start evaluation...")
        preds = []
        for i, b in enumerate(batch):
            preds += trainer.predict(b)
    else:
        # skip eval if dev data does not exist
        preds = []

    # write to file and score
    batch.conll.set(['upos', 'xpos', 'feats'], [y for x in preds for y in x])
    batch.conll.write_conll(system_pred_file)

    if gold_file is not None:
        _, _, score = scorer.score(system_pred_file, gold_file)

        print("Tagger score:")
        print("{} {:.2f}".format(args['shorthand'], score * 100))
Exemple #4
0
def evaluate(args):
    model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
        else '{}/{}_lm.pt'.format(args['save_dir'], args['shorthand'])
    pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'],
                                               args['shorthand'])

    # load pretrain
    pretrain = Pretrain(pretrain_file)

    # load model
    use_cuda = args['cuda'] and not args['cpu']
    trainer = Trainer(pretrain=pretrain,
                      model_file=model_file,
                      use_cuda=use_cuda)
    loaded_args, vocab = trainer.args, trainer.vocab

    # load config
    for k in args:
        if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'
                                                              ] or k == 'mode':
            loaded_args[k] = args[k]

    # load data
    print("Loading data with batch size {}...".format(args['eval_batch_size']))
    batch = DataLoader(args['eval_file'],
                       args['eval_batch_size'],
                       loaded_args,
                       pretrain,
                       vocab=vocab,
                       evaluation=True)

    loss = sum([trainer.update(b, eval=True) for b in batch]) / len(batch)
    print('Test ppl = {:.6f}'.format(np.exp(loss)))

    if args['output_file'] is not None:
        preds = []
        for b in batch:
            preds += trainer.predict(b)
        with open(args['output_file'], 'w') as fout:
            for sent in preds:
                fout.write(
                    ' '.join([vocab['word'].id2unit(wid)
                              for wid in sent]) + '\n')
Exemple #5
0
def train(args):
    utils.ensure_dir(args['save_dir'])
    model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
            else '{}/{}_tagger.pt'.format(args['save_dir'], args['shorthand'])

    # load pretrained vectors
    vec_file = args['wordvec_file']
    pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'],
                                               args['save_name'])
    pretrain = Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])

    # load data
    print("Loading data with batch size {}...".format(args['batch_size']))
    train_batch = DataLoader(args['train_file'],
                             args['batch_size'],
                             args,
                             pretrain,
                             evaluation=False)
    vocab = train_batch.vocab
    dev_batch = DataLoader(args['eval_file'],
                           args['batch_size'],
                           args,
                           pretrain,
                           vocab=vocab,
                           evaluation=True)

    # pred and gold path
    system_pred_file = args['output_file']
    gold_file = args['gold_file']

    # skip training if the language does not have training or dev data
    if len(train_batch) == 0 or len(dev_batch) == 0:
        print("Skip training because no data available...")
        sys.exit(0)

    print("Training tagger...")
    trainer = Trainer(args=args,
                      vocab=vocab,
                      pretrain=pretrain,
                      use_cuda=args['cuda'])

    global_step = 0
    max_steps = args['max_steps']
    dev_score_history = []
    best_dev_preds = []
    current_lr = args['lr']
    global_start_time = time.time()
    format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'

    if args['adapt_eval_interval']:
        args['eval_interval'] = utils.get_adaptive_eval_interval(
            dev_batch.num_examples, 2000, args['eval_interval'])
        print("Evaluating the model every {} steps...".format(
            args['eval_interval']))

    using_amsgrad = False
    last_best_step = 0
    # start training
    train_loss = 0
    while True:
        do_break = False
        for i, batch in enumerate(train_batch):
            start_time = time.time()
            global_step += 1
            loss = trainer.update(batch, eval=False)  # update step
            train_loss += loss
            if global_step % args['log_step'] == 0:
                duration = time.time() - start_time
                print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
                        max_steps, loss, duration, current_lr))

            if global_step % args['eval_interval'] == 0:
                # eval on dev
                print("Evaluating on dev set...")
                dev_preds = []
                for batch in dev_batch:
                    preds = trainer.predict(batch)
                    dev_preds += preds
                dev_batch.conll.set(['upos', 'xpos', 'feats'],
                                    [y for x in dev_preds for y in x])
                dev_batch.conll.write_conll(system_pred_file)
                _, _, dev_score = scorer.score(system_pred_file, gold_file)

                train_loss = train_loss / args[
                    'eval_interval']  # avg loss per batch
                print(
                    "step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(
                        global_step, train_loss, dev_score))
                train_loss = 0

                # save best model
                if len(dev_score_history
                       ) == 0 or dev_score > max(dev_score_history):
                    last_best_step = global_step
                    trainer.save(model_file)
                    print("new best model saved.")
                    best_dev_preds = dev_preds

                dev_score_history += [dev_score]
                print("")

            if global_step - last_best_step >= args['max_steps_before_stop']:
                if not using_amsgrad:
                    print("Switching to AMSGrad")
                    last_best_step = global_step
                    using_amsgrad = True
                    trainer.optimizer = optim.Adam(trainer.model.parameters(),
                                                   amsgrad=True,
                                                   lr=args['lr'],
                                                   betas=(.9, args['beta2']),
                                                   eps=1e-6)
                else:
                    do_break = True
                    break

            if global_step >= args['max_steps']:
                do_break = True
                break

        if do_break: break

        train_batch.reshuffle()

    print("Training ended with {} steps.".format(global_step))

    best_f, best_eval = max(dev_score_history) * 100, np.argmax(
        dev_score_history) + 1
    print("Best dev F1 = {:.2f}, at iteration = {}".format(
        best_f, best_eval * args['eval_interval']))
 def _set_up_model(self, config, use_gpu):
     self._pretrain = Pretrain(config['pretrain_path'])
     self._trainer = Trainer(pretrain=self.pretrain,
                             model_file=config['model_path'],
                             use_cuda=use_gpu)
Exemple #7
0
def train(args):
    utils.ensure_dir(args['save_dir'])
    model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
        else '{}/{}_lm.pt'.format(args['save_dir'], args['shorthand'])

    # load pretrained vectors
    vec_file = utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
    pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'],
                                               args['shorthand'])
    pretrain = Pretrain(pretrain_file, vec_file)

    # load data
    print("Loading data with batch size {}...".format(args['batch_size']))
    train_batch = DataLoader(args['train_file'],
                             args['batch_size'],
                             args,
                             pretrain,
                             evaluation=False)
    vocab = train_batch.vocab
    # train_dev_batch = DataLoader(args['train_file'], args['batch_size'], args, pretrain, vocab=vocab, evaluation=True)
    dev_batch = DataLoader(args['eval_file'],
                           args['eval_batch_size'],
                           args,
                           pretrain,
                           vocab=vocab,
                           evaluation=True)

    # skip training if the language does not have training or dev data
    if len(train_batch) == 0 or len(dev_batch) == 0:
        print("Skip training because no data available...")
        sys.exit(0)

    print("Training language model...")
    trainer = Trainer(args=args,
                      vocab=vocab,
                      pretrain=pretrain,
                      use_cuda=args['cuda'])

    print()
    print('Parameters:')
    n_param = 0
    for p_name, p in trainer.model.named_parameters():
        if p.requires_grad == True:
            n_param += np.prod(list(p.size()))
            print('\t{:10}    {}'.format(p_name, p.size()))
    print('\tTotal paramamters: {}'.format(n_param))

    global_step = 0
    max_steps = args['max_steps']
    dev_score_history = []
    current_lr = args['lr']
    global_start_time = time.time()
    format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), ppl = {:.6f}, lr: {:.6f}'

    last_best_step = 0
    log_loss = 0
    train_loss = 0
    while True:
        do_break = False
        for i, batch in enumerate(train_batch):
            start_time = time.time()
            global_step += 1
            loss = trainer.update(batch, eval=False)  # update step
            log_loss += loss
            train_loss += loss
            if global_step % args['log_step'] == 0:
                duration = time.time() - start_time
                log_loss /= args['log_step']
                print(
                    format_str.format(
                        datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                        global_step, max_steps, log_loss, duration,
                        np.exp(log_loss), current_lr))
                log_loss = 0

            if global_step % args['eval_interval'] == 0:
                # eval on dev
                print("Evaluating on dev set...")
                dev_loss = 0
                for batch in dev_batch:
                    dev_loss += trainer.update(batch, eval=True)
                dev_loss /= len(dev_batch)

                train_loss = train_loss / args[
                    'eval_interval']  # avg loss per batch
                print("step {}: train_ppl = {:.6f}, dev_ppl = {:.6f}".format(
                    global_step, np.exp(train_loss), np.exp(dev_loss)))
                train_loss = 0

                # save best model
                if len(dev_score_history
                       ) == 0 or dev_loss < min(dev_score_history):
                    last_best_step = global_step
                    trainer.save(model_file)
                    print("new best model saved.")
                dev_score_history.append(dev_loss)
                print()

            if global_step - last_best_step >= args['max_steps_before_stop']:
                do_break = True
                break

            if global_step >= args['max_steps']:
                do_break = True
                break

        if do_break:
            break

        train_batch.reshuffle()

    print("Training ended with {} steps.".format(global_step))

    best_ppl, best_eval = np.exp(
        min(dev_score_history)), np.argmin(dev_score_history) + 1
    print("Best dev ppl = {:.2f}, at iteration = {}".format(
        best_ppl, best_eval * args['eval_interval']))
 def _set_up_model(self, config, use_gpu):
     # get pretrained word vectors
     self._pretrain = Pretrain(config['pretrain_path'])
     # set up trainer
     self._trainer = Trainer(pretrain=self.pretrain, model_file=config['model_path'], use_cuda=use_gpu)
def train(args):
    utils.ensure_dir(args['save_dir'])
    model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
            else '{}/{}_nertagger.pt'.format(args['save_dir'], args['shorthand'])

    # load pretrained vectors
    vec_file = args['wordvec_file']
    pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'],
                                               args['save_name'])
    pretrain = Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])
    """
    if len(args['wordvec_file']) == 0:
        vec_file = utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
    else:
        vec_file = args['wordvec_file']
    # do not save pretrained embeddings individually
    pretrain = Pretrain(None, vec_file, args['pretrain_max_vocab'], save_to_file=False)
    """
    if args['charlm']:
        if args['charlm_shorthand'] is None:
            print(
                "CharLM Shorthand is required for loading pretrained CharLM model..."
            )
            sys.exit(0)
        print('Use pretrained contextualized char embedding')
        args['charlm_forward_file'] = '{}/{}_forward_charlm.pt'.format(
            args['charlm_save_dir'], args['charlm_shorthand'])
        args['charlm_backward_file'] = '{}/{}_backward_charlm.pt'.format(
            args['charlm_save_dir'], args['charlm_shorthand'])

    # load data
    print("Loading data with batch size {}...".format(args['batch_size']))
    train_doc = Document(json.load(open(args['train_file'])))
    train_batch = DataLoader(train_doc,
                             args['batch_size'],
                             args,
                             pretrain,
                             evaluation=False)
    vocab = train_batch.vocab
    dev_doc = Document(json.load(open(args['eval_file'])))
    dev_batch = DataLoader(dev_doc,
                           args['batch_size'],
                           args,
                           pretrain,
                           vocab=vocab,
                           evaluation=True)
    dev_gold_tags = dev_batch.tags

    # skip training if the language does not have training or dev data
    if len(train_batch) == 0 or len(dev_batch) == 0:
        print("Skip training because no data available...")
        sys.exit(0)

    print("Training tagger...")
    trainer = Trainer(args=args,
                      vocab=vocab,
                      pretrain=pretrain,
                      use_cuda=args['cuda'])
    print(trainer.model)

    global_step = 0
    max_steps = args['max_steps']
    dev_score_history = []
    best_dev_preds = []
    current_lr = trainer.optimizer.param_groups[0]['lr']
    global_start_time = time.time()
    format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'

    # LR scheduling
    if args['lr_decay'] > 0:
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(trainer.optimizer, mode='max', factor=args['lr_decay'], \
            patience=args['patience'], verbose=True, min_lr=args['min_lr'])
    else:
        scheduler = None

    # start training
    train_loss = 0
    while True:
        should_stop = False
        for i, batch in enumerate(train_batch):
            start_time = time.time()
            global_step += 1
            loss = trainer.update(batch, eval=False)  # update step
            train_loss += loss
            if global_step % args['log_step'] == 0:
                duration = time.time() - start_time
                print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
                        max_steps, loss, duration, current_lr))

            if global_step % args['eval_interval'] == 0:
                # eval on dev
                print("Evaluating on dev set...")
                dev_preds = []
                for batch in dev_batch:
                    preds = trainer.predict(batch)
                    dev_preds += preds
                _, _, dev_score = scorer.score_by_entity(
                    dev_preds, dev_gold_tags)

                train_loss = train_loss / args[
                    'eval_interval']  # avg loss per batch
                print(
                    "step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(
                        global_step, train_loss, dev_score))
                train_loss = 0

                # save best model
                if len(dev_score_history
                       ) == 0 or dev_score > max(dev_score_history):
                    trainer.save(model_file)
                    print("New best model saved.")
                    best_dev_preds = dev_preds

                dev_score_history += [dev_score]
                print("")

                # lr schedule
                if scheduler is not None:
                    scheduler.step(dev_score)

            # check stopping
            current_lr = trainer.optimizer.param_groups[0]['lr']
            if global_step >= args['max_steps'] or current_lr <= args['min_lr']:
                should_stop = True
                break

        if should_stop:
            break

        train_batch.reshuffle()

    print("Training ended with {} steps.".format(global_step))

    best_f, best_eval = max(dev_score_history) * 100, np.argmax(
        dev_score_history) + 1
    print("Best dev F1 = {:.2f}, at iteration = {}".format(
        best_f, best_eval * args['eval_interval']))
Exemple #10
0
def train(args):
    utils.ensure_dir(args['save_dir'])
    model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
        else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])

    pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'],
                                               args['shorthand'])
    vec_file = utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
    pretrain = Pretrain(pretrain_file, vec_file)
    use_cuda = args['cuda'] and not args['cpu']

    lm_train_batch = LMDataLoader(args['lm_file'],
                                  args['lm_batch_size'],
                                  args,
                                  pretrain,
                                  vocab=None,
                                  evaluation=False,
                                  cutoff=args['vocab_cutoff'])
    vocab = lm_train_batch.vocab
    dp_train_batch = DPDataLoader(args['train_file'],
                                  args['batch_size'],
                                  args,
                                  pretrain,
                                  vocab=None,
                                  evaluation=False,
                                  cutoff=args['vocab_cutoff'])
    vocab['deprel'] = dp_train_batch.vocab['deprel']
    dp_train_batch = DPDataLoader(args['train_file'],
                                  args['batch_size'],
                                  args,
                                  pretrain,
                                  vocab=vocab,
                                  evaluation=False,
                                  cutoff=args['vocab_cutoff'])
    train_dev_batch = DPDataLoader(args['train_file'],
                                   args['batch_size'],
                                   args,
                                   pretrain,
                                   vocab=vocab,
                                   evaluation=True)
    dev_batch = DPDataLoader(args['eval_file'],
                             args['batch_size'],
                             args,
                             pretrain,
                             vocab=vocab,
                             evaluation=True)

    lm_train_iter = iter(lm_train_batch)
    dp_train_iter = iter(dp_train_batch)

    # pred and gold path
    system_pred_file = args['output_file']
    gold_file = args['gold_file']

    print("Training parser...")
    trainer = Trainer(args=args,
                      vocab=vocab,
                      pretrain=pretrain,
                      use_cuda=args['cuda'],
                      weight_decay=args['wdecay'])
    print()
    print('Parameters that require grad:')
    for p_name, p in trainer.model.named_parameters():
        if p.requires_grad == True:
            print('\t{:10}    {}'.format(p_name, p.size()))

    global_step = 0
    max_steps = args['max_steps']
    dev_score_history = []
    best_dev_preds = []
    current_lr = args['lr']
    global_start_time = time.time()
    format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), dp_loss = {:.4f}, ppl = {:.2f}, lr: {:.6f}'

    using_amsgrad = False
    last_best_step = 0
    # start training
    log_loss = np.zeros(3)
    train_loss = np.zeros(3)
    while True:
        do_break = False

        try:
            lm_batch = next(lm_train_iter)
        except StopIteration:
            lm_train_iter = iter(lm_train_batch)
            lm_batch = next(lm_train_iter)
        try:
            dp_batch = next(dp_train_iter)
        except StopIteration:
            dp_train_iter = iter(dp_train_batch)
            dp_batch = next(dp_train_iter)

        start_time = time.time()
        global_step += 1
        dp_loss, lm_loss, loss = trainer.update(dp_batch, lm_batch,
                                                eval=False)  # update step
        log_loss += np.array([lm_loss, dp_loss, loss])
        train_loss += np.array([lm_loss, dp_loss, loss])
        if global_step % args['log_step'] == 0:
            duration = time.time() - start_time
            log_loss = log_loss / args['log_step']
            print(
                format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                                  global_step, max_steps,
                                  log_loss[2], duration, log_loss[1],
                                  np.exp(log_loss[0]), current_lr))
            log_loss[:] = 0

        if global_step % args['eval_interval'] == 0:
            # eval on train
            train_preds = []
            for batch in train_dev_batch:
                preds = trainer.predict(batch)
                train_preds += preds

            train_dev_batch.conll.set(['head', 'deprel'],
                                      [y for x in train_preds for y in x])
            train_dev_batch.conll.write_conll(system_pred_file)
            _, _, train_score = scorer.score(system_pred_file,
                                             args['train_file'])

            # eval on dev
            print("Evaluating on dev set...")
            dev_preds = []
            for batch in dev_batch:
                preds = trainer.predict(batch)
                dev_preds += preds

            dev_batch.conll.set(['head', 'deprel'],
                                [y for x in dev_preds for y in x])
            dev_batch.conll.write_conll(system_pred_file)
            _, _, dev_score = scorer.score(system_pred_file, gold_file)

            train_loss = train_loss / args[
                'eval_interval']  # avg loss per batch
            print("step {}: train_score = {:.4f}, dev_score = {:.4f}".format(
                global_step, train_score, dev_score))
            train_loss[:] = 0

            # save best model
            if len(dev_score_history
                   ) == 0 or dev_score > max(dev_score_history):
                last_best_step = global_step
                trainer.save(model_file)
                print("new best model saved.")
                best_dev_preds = dev_preds

            dev_score_history += [dev_score]
            print("")

        if global_step - last_best_step >= args['max_steps_before_stop']:
            if not using_amsgrad:
                print("Switching to AMSGrad")
                last_best_step = global_step
                using_amsgrad = True
                trainer.optimizer = optim.Adam(trainer.model.parameters(),
                                               amsgrad=True,
                                               lr=args['lr'],
                                               betas=(.9, args['beta2']),
                                               eps=1e-6)
            else:
                do_break = True
                break

        if global_step >= args['max_steps']:
            do_break = True
            break

        if do_break:
            break

        # train_batch.reshuffle()

    print("Training ended with {} steps.".format(global_step))

    best_f, best_eval = max(dev_score_history) * 100, np.argmax(
        dev_score_history) + 1
    print("Best dev F1 = {:.2f}, at iteration = {}".format(
        best_f, best_eval * args['eval_interval']))