コード例 #1
0
ファイル: elmo.py プロジェクト: ymedhat95/qb
    def train(self, training_data: TrainingData) -> None:
        x_train, y_train, x_val, y_val, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data)
        self.class_to_i = class_to_i
        self.i_to_class = i_to_class

        log.info('Batchifying data')
        train_batches = batchify(x_train, y_train, shuffle=True)
        val_batches = batchify(x_val, y_val, shuffle=False)
        self.model = ElmoModel(len(i_to_class), dropout=self.dropout)
        if CUDA:
            self.model = self.model.cuda()
        log.info(f'Parameters:\n{self.parameters()}')
        log.info(f'Model:\n{self.model}')
        parameters = list(self.model.classifier.parameters())
        for mix in self.model.elmo._scalar_mixes:
            parameters.extend(list(mix.parameters()))
        self.optimizer = Adam(parameters)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        patience=5,
                                                        verbose=True,
                                                        mode='max')
        temp_prefix = get_tmp_filename()
        self.model_file = f'{temp_prefix}.pt'
        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            self.model_file,
                            monitor='test_acc')
        ])
        log.info('Starting training')
        epoch = 0
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(train_batches)
            random.shuffle(train_batches)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(val_batches,
                                                            train=False)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)
            epoch += 1
コード例 #2
0
ファイル: rnn.py プロジェクト: npow/qb
    def train(self, training_data):
        log.info('Loading Quiz Bowl dataset')
        train_iter, val_iter, dev_iter = QuizBowl.iters(
            batch_size=self.batch_size,
            lower=self.lowercase,
            use_wiki=self.use_wiki,
            n_wiki_sentences=self.n_wiki_sentences,
            replace_title_mentions=self.wiki_title_replace_token,
            sort_within_batch=True)
        log.info(f'Training Data={len(training_data[0])}')
        log.info(f'N Train={len(train_iter.dataset.examples)}')
        log.info(f'N Test={len(val_iter.dataset.examples)}')
        fields: Dict[str, Field] = train_iter.dataset.fields
        self.page_field = fields['page']
        self.n_classes = len(self.ans_to_i)
        self.qanta_id_field = fields['qanta_id']
        self.emb_dim = 300

        self.text_field = fields['text']
        log.info(f'Text Vocab={len(self.text_field.vocab)}')

        log.info('Initializing Model')
        self.model = RnnModel(self.n_classes,
                              text_field=self.text_field,
                              emb_dim=self.emb_dim,
                              n_hidden_units=self.n_hidden_units,
                              n_hidden_layers=self.n_hidden_layers,
                              nn_dropout=self.nn_dropout)
        if CUDA:
            self.model = self.model.cuda()
        log.info(f'Parameters:\n{self.parameters()}')
        log.info(f'Model:\n{self.model}')
        self.optimizer = Adam(self.model.parameters())
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        patience=5,
                                                        verbose=True,
                                                        mode='max')

        temp_prefix = get_tmp_filename()
        self.model_file = f'{temp_prefix}.pt'
        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            self.model_file,
                            monitor='test_acc')
        ])

        log.info('Starting training')

        epoch = 0
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(train_iter)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(val_iter)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)
            epoch += 1
コード例 #3
0
    def train(self, training_data: TrainingData) -> None:

        if self.use_qb:
            x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
                training_data)
            if self.use_wiki:
                wiki_dataset = WikipediaDataset(set(training_data[1]))
                wiki_train_data = wiki_dataset.training_data()
                w_x_train_text, w_train_y, _, _, _, _, _ = preprocess_dataset(
                    wiki_train_data,
                    train_size=1,
                    vocab=vocab,
                    class_to_i=class_to_i,
                    i_to_class=i_to_class)
                x_train_text.extend(w_x_train_text)
                y_train.extend(w_train_y)
        else:
            if self.use_wiki:
                wiki_dataset = WikipediaDataset(set(training_data[1]))
                wiki_train_data = wiki_dataset.training_data()
                x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
                    wiki_train_data)
            else:
                raise ValueError(
                    'use_wiki and use_qb cannot both be false, otherwise there is no training data'
                )

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab,
                                                       expand_glove=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [
            convert_text_to_embeddings_indices(q, embedding_lookup)
            for q in x_train_text
        ]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [
            convert_text_to_embeddings_indices(q, embedding_lookup)
            for q in x_test_text
        ]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        self.n_classes = compute_n_classes(training_data[1])

        n_batches_train, t_x_train, t_offset_train, t_y_train = batchify(
            self.batch_size, x_train, y_train, truncate=True)
        n_batches_test, t_x_test, t_offset_test, t_y_test = batchify(
            self.batch_size, x_test, y_test, truncate=False)

        self.vocab_size = embeddings.shape[0]
        self.model = DanModel(self.vocab_size,
                              self.n_classes,
                              dropout_prob=self.dropout_prob,
                              k_softmaxes=self.k_softmaxes,
                              n_hidden_units=self.n_hidden_units,
                              non_linearity=self.non_linearity)
        log.info(f'Parameters:\n{pformat(self.parameters())}')
        log.info(f'Torch Model:\n{self.model}')
        self.model.init_weights(initial_embeddings=embeddings)
        if CUDA:
            self.model = self.model.cuda()

        self.optimizer = Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.NLLLoss()
        # self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        patience=5,
                                                        verbose=True)
        tb_experiment = ' '.join(
            f'{param}={value}' for param, value in
            [('model',
              'dan'), ('n_hidden_units',
                       self.n_hidden_units), ('dropout_prob',
                                              self.dropout_prob),
             ('k_softmaxes',
              self.k_softmaxes), ('non_linearity', self.non_linearity),
             ('learning_rate',
              self.learning_rate), ('batch_size', self.batch_size)])

        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor='test_loss', patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            '/tmp/dan.pt',
                            monitor='test_loss'),
            Tensorboard(tb_experiment)
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(n_batches_train,
                                                               t_x_train,
                                                               t_offset_train,
                                                               t_y_train,
                                                               evaluate=False)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(n_batches_test,
                                                            t_x_test,
                                                            t_offset_test,
                                                            t_y_test,
                                                            evaluate=True)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_loss)

        log.info('Done training')
コード例 #4
0
ファイル: tied.py プロジェクト: Agnon1573/qb
    def train(self, training_data):
        log.info('Loading Quiz Bowl dataset')
        train_iter, val_iter, dev_iter = QuizBowl.iters(
            batch_size=self.batch_size, lower=self.lowercase,
            use_wiki=self.use_wiki, n_wiki_sentences=self.n_wiki_sentences,
            replace_title_mentions=self.wiki_title_replace_token
        )
        log.info(f'N Train={len(train_iter.dataset.examples)}')
        log.info(f'N Test={len(val_iter.dataset.examples)}')
        fields: Dict[str, Field] = train_iter.dataset.fields
        self.page_field = fields['page']
        self.n_classes = len(self.ans_to_i)
        self.qnum_field = fields['qnum']
        self.text_field = fields['text']
        self.emb_dim = self.text_field.vocab.vectors.shape[1]
        log.info(f'Vocab={len(self.text_field.vocab)}')

        log.info('Initializing Model')
        self.model = TiedModel(
            self.text_field, self.n_classes, emb_dim=self.emb_dim,
            n_hidden_units=self.n_hidden_units, n_hidden_layers=self.n_hidden_layers,
            nn_dropout=self.nn_dropout, sm_dropout=self.sm_dropout
        )
        if CUDA:
            self.model = self.model.cuda()
        log.info(f'Parameters:\n{self.parameters()}')
        log.info(f'Model:\n{self.model}')
        self.optimizer = Adam(self.model.parameters(), lr=self.lr)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=5, verbose=True, mode='max')

        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(), EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100), ModelCheckpoint(create_save_model(self.model), '/tmp/tied.pt', monitor='test_acc')
        ])

        log.info('Starting training')
        try:
            import socket
            from kuro import Worker
            worker = Worker(socket.gethostname())
            experiment = worker.experiment(
                'guesser', 'Tied', hyper_parameters=conf['guessers']['Tied'],
                metrics=[
                    'train_acc', 'train_loss', 'test_acc', 'test_loss'
                ], n_trials=5
            )
            trial = experiment.trial()
            if trial is not None:
                self.kuro_trial_id = trial.id
        except ModuleNotFoundError:
            trial = None

        epoch = 0
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(train_iter)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(val_iter)

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if trial is not None:
                trial.report_metric('test_acc', test_acc, step=epoch)
                trial.report_metric('test_loss', test_loss, step=epoch)
                trial.report_metric('train_acc', train_acc, step=epoch)
                trial.report_metric('train_loss', train_loss, step=epoch)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)
            epoch += 1
コード例 #5
0
ファイル: dan.py プロジェクト: amit2014/qb
    def train(self, training_data: TrainingData) -> None:
        x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data
        )

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab, expand_glove=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train_text]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test_text]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        self.n_classes = compute_n_classes(training_data[1])

        n_batches_train, t_x_train, t_offset_train, t_y_train = batchify(
            self.batch_size, x_train, y_train, truncate=True)
        n_batches_test, t_x_test, t_offset_test, t_y_test = batchify(
            self.batch_size, x_test, y_test, truncate=False)

        self.model = DanModel(embeddings.shape[0], self.n_classes)
        self.model.init_weights(initial_embeddings=embeddings)
        self.model.cuda()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.CrossEntropyLoss()

        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1), MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model), '/tmp/dan.pt', monitor='test_acc')
            # Tensorboard('dan', log_dir='tb-logs')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(
                n_batches_train,
                t_x_train, t_offset_train, t_y_train, evaluate=False
            )

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(
                n_batches_test,
                t_x_test, t_offset_test, t_y_test, evaluate=True
            )

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if stop_training:
                log.info(' '.join(reasons))
                break

        log.info('Done training')
コード例 #6
0
    def train(self, training_data):
        log.info("Loading Quiz Bowl dataset")
        train_iter, val_iter, dev_iter = QuizBowl.iters(
            batch_size=self.batch_size,
            lower=self.lowercase,
            use_wiki=self.use_wiki,
            n_wiki_sentences=self.n_wiki_sentences,
            replace_title_mentions=self.wiki_title_replace_token,
            combined_ngrams=self.combined_ngrams,
            unigrams=self.unigrams,
            bigrams=self.bigrams,
            trigrams=self.trigrams,
            combined_max_vocab_size=self.combined_max_vocab_size,
            unigram_max_vocab_size=self.unigram_max_vocab_size,
            bigram_max_vocab_size=self.bigram_max_vocab_size,
            trigram_max_vocab_size=self.trigram_max_vocab_size,
        )
        log.info(f"N Train={len(train_iter.dataset.examples)}")
        log.info(f"N Test={len(val_iter.dataset.examples)}")
        fields: Dict[str, Field] = train_iter.dataset.fields
        self.page_field = fields["page"]
        self.n_classes = len(self.ans_to_i)
        self.qanta_id_field = fields["qanta_id"]
        self.emb_dim = 300

        if "text" in fields:
            self.text_field = fields["text"]
            log.info(f"Text Vocab={len(self.text_field.vocab)}")
        if "unigram" in fields:
            self.unigram_field = fields["unigram"]
            log.info(f"Unigram Vocab={len(self.unigram_field.vocab)}")
        if "bigram" in fields:
            self.bigram_field = fields["bigram"]
            log.info(f"Bigram Vocab={len(self.bigram_field.vocab)}")
        if "trigram" in fields:
            self.trigram_field = fields["trigram"]
            log.info(f"Trigram Vocab={len(self.trigram_field.vocab)}")

        log.info("Initializing Model")
        self.model = DanModel(
            self.n_classes,
            text_field=self.text_field,
            unigram_field=self.unigram_field,
            bigram_field=self.bigram_field,
            trigram_field=self.trigram_field,
            emb_dim=self.emb_dim,
            n_hidden_units=self.n_hidden_units,
            n_hidden_layers=self.n_hidden_layers,
            nn_dropout=self.nn_dropout,
            pooling=self.pooling,
        )
        if CUDA:
            self.model = self.model.cuda()
        log.info(f"Parameters:\n{self.parameters()}")
        log.info(f"Model:\n{self.model}")
        self.optimizer = Adam(self.model.parameters())
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        patience=5,
                                                        verbose=True,
                                                        mode="max")

        temp_prefix = get_tmp_filename()
        self.model_file = f"{temp_prefix}.pt"
        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor="test_acc", patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            self.model_file,
                            monitor="test_acc"),
        ])

        log.info("Starting training")

        epoch = 0
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(train_iter)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(val_iter)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(" ".join(reasons))
                break
            else:
                self.scheduler.step(test_acc)
            epoch += 1
コード例 #7
0
    def train(self, training_data: TrainingData):
        x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data)

        if self.use_wiki:
            wiki_dataset = FilteredWikipediaDataset()
            wiki_train_data = wiki_dataset.training_data()
            w_x_train_text, w_train_y, _, _, _, _, _ = preprocess_dataset(
                wiki_train_data,
                train_size=1,
                vocab=vocab,
                class_to_i=class_to_i,
                i_to_class=i_to_class)
            x_train_text.extend(w_x_train_text)
            y_train.extend(w_train_y)

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab,
                                                       expand_glove=True,
                                                       mask_zero=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [
            convert_text_to_embeddings_indices(q,
                                               embedding_lookup,
                                               random_unk_prob=.05)
            for q in x_train_text
        ]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [
            convert_text_to_embeddings_indices(q,
                                               embedding_lookup,
                                               random_unk_prob=.05)
            for q in x_test_text
        ]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        self.n_classes = compute_n_classes(training_data[1])

        n_batches_train, t_x_train, lengths_train, t_y_train, _ = batchify(
            self.batch_size, x_train, y_train, truncate=True)
        n_batches_test, t_x_test, lengths_test, t_y_test, _ = batchify(
            self.batch_size, x_test, y_test, truncate=False)

        self.model = RnnModel(embeddings.shape[0], self.n_classes)
        self.model.init_weights(embeddings=embeddings)
        self.model.cuda()
        self.optimizer = Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        'max',
                                                        patience=5,
                                                        verbose=True)

        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            '/tmp/rnn.pt',
                            monitor='test_acc')
            #Tensorboard('rnn', log_dir='tb-logs')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(n_batches_train,
                                                               t_x_train,
                                                               lengths_train,
                                                               t_y_train,
                                                               evaluate=False)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(n_batches_test,
                                                            t_x_test,
                                                            lengths_test,
                                                            t_y_test,
                                                            evaluate=True)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)

        log.info('Done training')
コード例 #8
0
ファイル: dan.py プロジェクト: nadesai/qb
    def train(self, training_data: TrainingData) -> None:

        if self.use_qb:
            x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
                training_data
            )
            if self.use_wiki:
                wiki_dataset = WikipediaDataset(set(training_data[1]))
                wiki_train_data = wiki_dataset.training_data()
                w_x_train_text, w_train_y, _, _, _, _, _ = preprocess_dataset(
                    wiki_train_data, train_size=1, vocab=vocab, class_to_i=class_to_i, i_to_class=i_to_class
                )
                x_train_text.extend(w_x_train_text)
                y_train.extend(w_train_y)
        else:
            if self.use_wiki:
                wiki_dataset = WikipediaDataset(set(training_data[1]))
                wiki_train_data = wiki_dataset.training_data()
                x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
                    wiki_train_data
                )
            else:
                raise ValueError('use_wiki and use_qb cannot both be false, otherwise there is no training data')

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab, expand_glove=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train_text]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test_text]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        self.n_classes = compute_n_classes(training_data[1])

        log.info(f'Batching: {len(x_train)} train questions and {len(x_test)} test questions')

        n_batches_train, t_x_train, t_offset_train, t_y_train = batchify(
            self.batch_size, x_train, y_train, truncate=True)
        n_batches_test, t_x_test, t_offset_test, t_y_test = batchify(
            self.batch_size, x_test, y_test, truncate=False)

        self.vocab_size = embeddings.shape[0]
        self.model = DanModel(self.vocab_size, self.n_classes, embeddings=embeddings)
        if CUDA:
            self.model = self.model.cuda()
        log.info(f'Model:\n{self.model}')

        self.optimizer = Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=5, verbose=True, mode='max')

        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1), MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model), '/tmp/dan.pt', monitor='test_acc')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(
                n_batches_train,
                t_x_train, t_offset_train, t_y_train, evaluate=False
            )

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(
                n_batches_test,
                t_x_test, t_offset_test, t_y_test, evaluate=True
            )

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)

        log.info('Done training')
コード例 #9
0
ファイル: rnn_entity.py プロジェクト: amit2014/qb
    def train(self, training_data: TrainingData):
        log.info('Preprocessing the dataset')
        self.nlp = spacy.load('en', create_pipeline=custom_spacy_pipeline)
        x_train_tokens, y_train, x_test_tokens, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            self.nlp, training_data)

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        log.info('Loading word embeddings')
        word_embeddings, multi_embedding_lookup = load_multi_embeddings(
            multi_vocab=vocab)
        self.word_embeddings = word_embeddings
        self.multi_embedding_lookup = multi_embedding_lookup

        log.info('Batching the dataset')
        train_dataset = BatchedDataset(self.batch_size, multi_embedding_lookup,
                                       x_train_tokens, y_train)
        test_dataset = BatchedDataset(self.batch_size, multi_embedding_lookup,
                                      x_test_tokens, y_test)
        self.n_classes = compute_n_classes(training_data[1])

        log.info('Initializing neural model')
        self.model = RnnEntityModel(len(multi_embedding_lookup.word),
                                    len(multi_embedding_lookup.pos),
                                    len(multi_embedding_lookup.iob),
                                    len(multi_embedding_lookup.ent_type),
                                    self.n_classes)
        self.model.init_weights(word_embeddings=word_embeddings)
        self.model.cuda()
        self.optimizer = Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        'max',
                                                        patience=5,
                                                        verbose=True)

        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            '/tmp/rnn_entity.pt',
                            monitor='test_acc')
            #Tensorboard('rnn_entity', log_dir='tb-logs')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(train_dataset,
                                                               evaluate=False)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(test_dataset,
                                                            evaluate=True)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)

        log.info('Done training')
コード例 #10
0
ファイル: bcn.py プロジェクト: nadesai/qb
    def train(self, training_data) -> None:
        log.info('Preprocessing data')
        x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data
        )
        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab, expand_glove=True, mask_zero=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train_text]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test_text]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        log.info('Batching data')
        n_batches_train, t_x_train, lengths_train, masks_train, t_y_train = batchify(
            self.batch_size, x_train, y_train, truncate=True
        )
        n_batches_test, t_x_test, lengths_test, masks_test, t_y_test = batchify(
            self.batch_size, x_test, y_test, truncate=False, shuffle=False
        )

        self.n_classes = compute_n_classes(training_data[1])

        log.info('Creating model')
        self.model = BCN(
            300, 500, embeddings.shape[0], self.n_classes,
            We=torch.from_numpy(embeddings)
        ).cuda()
        self.optimizer = Adam(self.model.parameters())
        self.criterion = nn.NLLLoss()

        log.info(f'Model:\n{self.model}')

        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1), MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model), '/tmp/bcn.pt', monitor='test_acc'),
            Tensorboard('bcn')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(
                n_batches_train,
                t_x_train, lengths_train, masks_train, t_y_train, evaluate=False
            )

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(
                n_batches_test,
                t_x_test, lengths_test, masks_test, t_y_test, evaluate=True
            )

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if stop_training:
                log.info(' '.join(reasons))
                break