Exemplo n.º 1
0
def main(training_file, training_dir, load_model, skip_train):
    logging.debug('Initializing random seed to 0.')
    random.seed(0)
    np.random.seed(0)

    if load_model:
        tagger = Tagger.load(load_model)
        data = TaggingDataset.load_from_file(training_file,
                                             vocab=tagger.vocab,
                                             tags=tagger.tags)
    else:
        assert not skip_train, 'Cannot --skip_train without a saved model.'
        logging.debug('Loading dataset from: %s' % training_file)
        data = TaggingDataset.load_from_file(training_file)
        logging.debug('Initializing model.')
        tagger = Tagger(data.vocab, data.tags)

    if not skip_train:
        train_data, dev_data = data.split(0.7)

        batches_train = train_data.prepare_batches(n_seqs_per_batch=10)
        batches_dev = dev_data.prepare_batches(n_seqs_per_batch=100)

        train_mgr = TrainingManager(
            avg_n_losses=len(batches_train),
            training_dir=training_dir,
            tagger_taste_fn=lambda: taste_tagger(tagger, batches_train),
            tagger_dev_eval_fn=lambda: eval_tagger(tagger, batches_dev),
            tagger_save_fn=lambda fname: tagger.save(fname))

        logging.debug('Starting training.')
        while train_mgr.should_continue():
            mb_x, mb_y = random.choice(batches_train)
            mb_loss = tagger.learn(mb_x, mb_y)

            train_mgr.tick(mb_loss=mb_loss)

    evaluate_tagger_and_writeout(tagger)
Exemplo n.º 2
0
def main(training_file, training_dir, load_model, skip_train):
    logging.debug('Initializing random seed to 0.')
    random.seed(0)
    np.random.seed(0)

    if load_model:
        tagger = Tagger.load(load_model)
        data = TaggingDataset.load_from_file(training_file, vocab=tagger.vocab, tags=tagger.tags)
    else:
        assert not skip_train, 'Cannot --skip_train without a saved model.'
        logging.debug('Loading dataset from: %s' % training_file)
        data = TaggingDataset.load_from_file(training_file)
        logging.debug('Initializing model.')
        tagger = Tagger(data.vocab, data.tags)

    if not skip_train:
        train_data, dev_data = data.split(0.7)

        batches_train = train_data.prepare_batches(n_seqs_per_batch=10)
        batches_dev = dev_data.prepare_batches(n_seqs_per_batch=100)

        train_mgr = TrainingManager(
            avg_n_losses=len(batches_train),
            training_dir=training_dir,
            tagger_taste_fn=lambda: taste_tagger(tagger, batches_train),
            tagger_dev_eval_fn=lambda: eval_tagger(tagger, batches_dev),
            tagger_save_fn=lambda fname: tagger.save(fname)
        )

        logging.debug('Starting training.')
        while train_mgr.should_continue():
            mb_x, mb_y = random.choice(batches_train)
            mb_loss = tagger.learn(mb_x, mb_y)

            train_mgr.tick(mb_loss=mb_loss)

    evaluate_tagger_and_writeout(tagger)
Exemplo n.º 3
0
def main(args):
    logging.debug('Initializing random seed to 0.')
    random.seed(0)
    np.random.seed(0)
    tf.set_random_seed(0)

    logging.debug('Loading training dataset from: %s' % args.training_file)
    train_data = TaggingDataset.load_from_file(args.training_file)
    dev_data = TaggingDataset.load_from_file(None, vocab=train_data.vocab,
                                             alphabet=train_data.alphabet, tags=train_data.tags)
    logging.debug('Initializing model.')
    tagger = Tagger(train_data.vocab, train_data.tags, train_data.alphabet,
                    word_embedding_size=args.word_embedding_size,
                    char_embedding_size=args.char_embedding_size,
                    num_chars=args.max_word_length,
                    num_steps=args.max_sentence_length,
                    optimizer_desc=args.optimizer,
                    generate_lemmas=args.generate_lemmas,
                    l2=args.l2,
                    dropout_prob_values=[float(x) for x in args.dropout.split(",")],
                    experiment_name=args.exp_name,
                    supply_form_characters_to_lemma=args.supply_form_characters_to_lemma,
                    threads=args.threads,
                    use_attention=args.use_attention,
                    scheduled_sampling=args.scheduled_sampling)

    batches_train = train_data.prepare_batches(
        args.batch_size, args.max_sentence_length, args.max_word_length)
    batches_dev = dev_data.prepare_batches(
        2100, args.max_sentence_length, args.max_word_length)

    train_mgr = TrainingManager(
        len(batches_train), args.eval_interval,
        training_dir=args.training_dir,
        tagger_taste_fn=lambda: taste_tagger(tagger, batches_train),
        tagger_dev_eval_fn=lambda: eval_tagger(tagger, batches_dev),
        tagger_save_fn=lambda fname: tagger.save(fname)
    )

    import signal
    force_eval = {"value": False}
    def handle_sigquit(signal, frame):
        logging.debug("Ctrl+\\ recieved, evaluation will be forced.")
        force_eval["value"] = True
        pass
    signal.signal(signal.SIGQUIT, handle_sigquit)

    logging.debug('Starting training.')
    try:
        permuted_batches = []
        while train_mgr.should_continue(max_epochs=args.max_epochs):
            if not permuted_batches:
                permuted_batches = batches_train[:]
                random.shuffle(permuted_batches)
            words, chars, tags, lengths, lemma_chars, chars_lengths = permuted_batches.pop()
            oov_mask = np.vectorize(lambda x: train_data.vocab.count(x) == 1 and np.random.uniform() < args.oov_sampling_p)(words)
            words = np.where(oov_mask, np.zeros(words.shape), words)
            mb_loss = tagger.learn(words, chars, tags, lengths, lemma_chars, chars_lengths)

            train_mgr.tick(mb_loss=mb_loss, force_eval=force_eval["value"])
            force_eval["value"] = False
    except KeyboardInterrupt:
        logging.debug("Ctrl+C recieved, stopping training.")

    run_tagger_and_writeout(tagger, dev_data)