コード例 #1
0
ファイル: cnn.py プロジェクト: amit2014/qb
    def train(self, training_data: TrainingData) -> None:
        log.info('Preprocessing training data...')
        x_train, y_train, x_test, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data, create_runs=self.train_on_q_runs, full_question=self.train_on_full_q)
        if self.wiki_data_frac > 0:
            log.info('Using wikipedia with fraction: {}'.format(self.wiki_data_frac))
            wiki_data = FilteredWikipediaDataset().training_data()
            results = preprocess_dataset(
                wiki_data,
                train_size=1,
                vocab=vocab,
                class_to_i=class_to_i,
                i_to_class=i_to_class)
            x_train.extend(results[0])
            y_train.extend(results[1])

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        log.info('Creating embeddings...')
        embeddings, embedding_lookup = load_embeddings(vocab=vocab, expand_glove=self.expand_we, mask_zero=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        log.info('Converting dataset to embeddings...')
        x_train = [nn.convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train]
        x_test = [nn.convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test]
        self.n_classes = nn.compute_n_classes(training_data[1])
        self.max_len = nn.compute_max_len(training_data)
        x_train = np.array(nn.tf_format(x_train, self.max_len, 0))
        x_test = np.array(nn.tf_format(x_test, self.max_len, 0))

        log.info('Building keras model...')
        self.model = self.build_model()

        log.info('Training model...')
        callbacks = [
            TensorBoard(),
            EarlyStopping(patience=self.max_patience, monitor='val_sparse_categorical_accuracy'),
            ModelCheckpoint(
                safe_path(CNN_MODEL_TMP_TARGET),
                save_best_only=True,
                monitor='val_sparse_categorical_accuracy'
            )
        ]
        if self.decay_lr_on_plateau:
            callbacks.append(ReduceLROnPlateau(monitor='val_sparse_categorical_accuracy', factor=.5, patience=5))
        history = self.model.fit(
            x_train, y_train,
            validation_data=(x_test, y_test),
            batch_size=self.batch_size, epochs=self.max_n_epochs,
            callbacks=callbacks, verbose=2
        )
        self.history = history.history
        log.info('Done training')
コード例 #2
0
ファイル: dan_tf.py プロジェクト: xxlatgh/qb
    def train(self,
              training_data: TrainingData) -> None:
        log.info('Preprocessing training data...')
        x_train, y_train, _, x_test, y_test, _, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data)
        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        if self.use_wiki:
            wiki_training_data = WikipediaDataset(self.min_answers).training_data()
            x_train_wiki, y_train_wiki, _, _, _, _, _, _, _ = preprocess_dataset(
                wiki_training_data, train_size=1, vocab=vocab, class_to_i=class_to_i,
                i_to_class=i_to_class)

        log.info('Creating embeddings...')
        embeddings, embedding_lookup = _load_embeddings(vocab=vocab)
        self.embeddings = embeddings
        self.embedding_lookup = self.embedding_lookup

        log.info('Converting dataset to embeddings...')
        x_train = [_convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train]
        x_train_lengths = _compute_lengths(x_train)

        x_test = [_convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test]
        x_test_lengths = _compute_lengths(x_test)

        if self.use_wiki:
            x_train_wiki = [_convert_text_to_embeddings_indices(q, embedding_lookup)
                            for q in x_train_wiki]
            x_train_lengths_wiki = _compute_lengths(x_train_wiki)
            x_train.extend(x_train_wiki)
            y_train.extend(y_train_wiki)
            x_train_lengths = np.concatenate([x_train_lengths, x_train_lengths_wiki])

        log.info('Computing number of classes and max paragraph length in words')
        self.n_classes = _compute_n_classes(training_data[1])
        self.max_len = _compute_max_len(x_train)
        x_train = _tf_format(x_train, self.max_len, embeddings.shape[0])
        x_test = _tf_format(x_test, self.max_len, embeddings.shape[0])

        log.info('Training deep model...')
        self.model = TFDanModel(self.dan_params, self.max_len, self.n_classes)
        x_train = np.array(x_train)
        y_train = np.array(y_train)
        x_test = np.array(x_test)
        y_test = np.array(y_test)
        train_losses, train_accuracies, holdout_losses, holdout_accuracies = self.model.train(
            x_train, y_train, x_train_lengths, x_test, y_test, x_test_lengths)
コード例 #3
0
ファイル: memory.py プロジェクト: amit2014/qb
    def train(self, training_data):
        x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data
        )
        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab, expand_glove=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train_text]
        for row in x_train:
            if len(row) == 0:
                row.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        mems_train = load_memories([' '.join(x) for x in x_train_text], self.n_memories)
        mems_indices_train = memories_to_indices(mems_train, embedding_lookup)

        x_test = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test_text]
        for row in x_test:
            if len(row) == 0:
                row.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        mems_test = load_memories([' '.join(x) for x in x_test_text], self.n_memories)
        mem_indices_test = memories_to_indices(mems_test, embedding_lookup)

        self.n_classes = compute_n_classes(training_data[1])
コード例 #4
0
def get_quizbowl():
    qb_dataset = QuizBowlDataset(guesser_train=True, buzzer_train=False)
    training_data = qb_dataset.training_data()
    train_x, train_y, dev_x, dev_y, i_to_word, class_to_i, i_to_class = preprocess_dataset(
        training_data)
    i_to_word = ['<unk>', '<eos>'] + sorted(i_to_word)
    word_to_i = {x: i for i, x in enumerate(i_to_word)}
    train = transform_to_array(zip(train_x, train_y), word_to_i)
    dev = transform_to_array(zip(dev_x, dev_y), word_to_i)
    return train, dev, word_to_i, i_to_class
コード例 #5
0
ファイル: elmo.py プロジェクト: ymedhat95/qb
    def train(self, training_data: TrainingData) -> None:
        x_train, y_train, x_val, y_val, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data)
        self.class_to_i = class_to_i
        self.i_to_class = i_to_class

        log.info('Batchifying data')
        train_batches = batchify(x_train, y_train, shuffle=True)
        val_batches = batchify(x_val, y_val, shuffle=False)
        self.model = ElmoModel(len(i_to_class), dropout=self.dropout)
        if CUDA:
            self.model = self.model.cuda()
        log.info(f'Parameters:\n{self.parameters()}')
        log.info(f'Model:\n{self.model}')
        parameters = list(self.model.classifier.parameters())
        for mix in self.model.elmo._scalar_mixes:
            parameters.extend(list(mix.parameters()))
        self.optimizer = Adam(parameters)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        patience=5,
                                                        verbose=True,
                                                        mode='max')
        temp_prefix = get_tmp_filename()
        self.model_file = f'{temp_prefix}.pt'
        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            self.model_file,
                            monitor='test_acc')
        ])
        log.info('Starting training')
        epoch = 0
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(train_batches)
            random.shuffle(train_batches)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(val_batches,
                                                            train=False)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)
            epoch += 1
コード例 #6
0
ファイル: elmo.py プロジェクト: Pinafore/qb
    def train(self, training_data: TrainingData) -> None:
        x_train, y_train, x_val, y_val, vocab, class_to_i, i_to_class = preprocess_dataset(training_data)
        self.class_to_i = class_to_i
        self.i_to_class = i_to_class

        log.info('Batchifying data')
        train_batches = batchify(x_train, y_train, shuffle=True)
        val_batches = batchify(x_val, y_val, shuffle=False)
        self.model = ElmoModel(len(i_to_class), dropout=self.dropout)
        if CUDA:
            self.model = self.model.cuda()
        log.info(f'Parameters:\n{self.parameters()}')
        log.info(f'Model:\n{self.model}')
        parameters = list(self.model.classifier.parameters())
        for mix in self.model.elmo._scalar_mixes:
            parameters.extend(list(mix.parameters()))
        self.optimizer = Adam(parameters)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=5, verbose=True, mode='max')
        temp_prefix = get_tmp_filename()
        self.model_file = f'{temp_prefix}.pt'
        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(), EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100), ModelCheckpoint(create_save_model(self.model), self.model_file, monitor='test_acc')
        ])
        log.info('Starting training')
        epoch = 0
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(train_batches)
            random.shuffle(train_batches)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(val_batches, train=False)

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)
            epoch += 1
コード例 #7
0
ファイル: aux_dan.py プロジェクト: xxlatgh/qb
    def train(self, training_data: TrainingData) -> None:
        log.info('Preprocessing training data...')
        x_train, y_train, properties_train, x_test, y_test, properties_test, vocab,\
            class_to_i, i_to_class = preprocess_dataset(training_data)

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        self.ans_type_i_to_class, self.ans_type_class_to_i = compute_ans_type_classes(
            properties_train)
        self.n_ans_type_classes = len(self.ans_type_class_to_i)
        compute_ans_type_classes(properties_test)

        self.category_i_to_class, self.category_class_to_i = compute_category_classes(
            properties_train)
        self.n_category_classes = len(self.category_class_to_i)
        compute_category_classes(properties_test)

        self.gender_i_to_class, self.gender_class_to_i = compute_gender_classes(
            properties_train)
        self.n_gender_classes = len(self.gender_class_to_i)
        compute_gender_classes(properties_test)

        ans_type_labels_train = np.array([
            self.ans_type_class_to_i[prop['ans_type']]
            for prop in properties_train
        ])
        category_labels_train = np.array([
            self.category_class_to_i[prop['category']]
            for prop in properties_train
        ])
        gender_labels_train = np.array([
            self.gender_class_to_i[prop['gender']] for prop in properties_train
        ])

        ans_type_labels_test = np.array([
            self.ans_type_class_to_i[prop['ans_type']]
            for prop in properties_test
        ])
        category_labels_test = np.array([
            self.category_class_to_i[prop['category']]
            for prop in properties_test
        ])
        gender_labels_test = np.array([
            self.gender_class_to_i[prop['gender']] for prop in properties_test
        ])

        if self.use_wiki:
            wiki_training_data = WikipediaDataset(
                self.min_answers).training_data()
            x_train_wiki, y_train_wiki, _, _, _, _, _, _, _ = preprocess_dataset(
                wiki_training_data,
                train_size=1,
                vocab=vocab,
                class_to_i=class_to_i,
                i_to_class=i_to_class)

        log.info('Creating embeddings...')
        embeddings, embedding_lookup = _load_embeddings(vocab=vocab)
        self.embeddings = embeddings
        self.embedding_lookup = self.embedding_lookup

        log.info('Converting dataset to embeddings...')
        x_train = [
            convert_text_to_embeddings_indices(q, embedding_lookup)
            for q in x_train
        ]
        x_train_lengths = compute_lengths(x_train)

        x_test = [
            convert_text_to_embeddings_indices(q, embedding_lookup)
            for q in x_test
        ]
        x_test_lengths = compute_lengths(x_test)

        log.info(
            'Computing number of classes and max paragraph length in words')
        self.n_classes = compute_n_classes(training_data[1])
        self.max_len = compute_max_len(x_train)
        x_train = tf_format(x_train, self.max_len, embeddings.shape[0])
        x_test = tf_format(x_test, self.max_len, embeddings.shape[0])

        log.info('Training deep model...')
        self.model = AuxDanModel(self.dan_params, self.max_len, self.n_classes,
                                 self.n_ans_type_classes,
                                 self.n_gender_classes,
                                 self.n_category_classes)
        x_train = np.array(x_train)
        y_train = np.array(y_train)
        x_test = np.array(x_test)
        y_test = np.array(y_test)
        property_labels_train = {
            'ans_type': ans_type_labels_train,
            'category': category_labels_train,
            'gender': gender_labels_train
        }

        property_labels_test = {
            'ans_type': ans_type_labels_test,
            'category': category_labels_test,
            'gender': gender_labels_test
        }
        train_losses, train_accuracies, holdout_losses, holdout_accuracies = self.model.train(
            x_train, y_train, x_train_lengths, property_labels_train, x_test,
            y_test, x_test_lengths, property_labels_test)
コード例 #8
0
    def train(self, training_data: TrainingData) -> None:

        if self.use_qb:
            x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
                training_data)
            if self.use_wiki:
                wiki_dataset = WikipediaDataset(set(training_data[1]))
                wiki_train_data = wiki_dataset.training_data()
                w_x_train_text, w_train_y, _, _, _, _, _ = preprocess_dataset(
                    wiki_train_data,
                    train_size=1,
                    vocab=vocab,
                    class_to_i=class_to_i,
                    i_to_class=i_to_class)
                x_train_text.extend(w_x_train_text)
                y_train.extend(w_train_y)
        else:
            if self.use_wiki:
                wiki_dataset = WikipediaDataset(set(training_data[1]))
                wiki_train_data = wiki_dataset.training_data()
                x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
                    wiki_train_data)
            else:
                raise ValueError(
                    'use_wiki and use_qb cannot both be false, otherwise there is no training data'
                )

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab,
                                                       expand_glove=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [
            convert_text_to_embeddings_indices(q, embedding_lookup)
            for q in x_train_text
        ]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [
            convert_text_to_embeddings_indices(q, embedding_lookup)
            for q in x_test_text
        ]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        self.n_classes = compute_n_classes(training_data[1])

        n_batches_train, t_x_train, t_offset_train, t_y_train = batchify(
            self.batch_size, x_train, y_train, truncate=True)
        n_batches_test, t_x_test, t_offset_test, t_y_test = batchify(
            self.batch_size, x_test, y_test, truncate=False)

        self.vocab_size = embeddings.shape[0]
        self.model = DanModel(self.vocab_size,
                              self.n_classes,
                              dropout_prob=self.dropout_prob,
                              k_softmaxes=self.k_softmaxes,
                              n_hidden_units=self.n_hidden_units,
                              non_linearity=self.non_linearity)
        log.info(f'Parameters:\n{pformat(self.parameters())}')
        log.info(f'Torch Model:\n{self.model}')
        self.model.init_weights(initial_embeddings=embeddings)
        if CUDA:
            self.model = self.model.cuda()

        self.optimizer = Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.NLLLoss()
        # self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        patience=5,
                                                        verbose=True)
        tb_experiment = ' '.join(
            f'{param}={value}' for param, value in
            [('model',
              'dan'), ('n_hidden_units',
                       self.n_hidden_units), ('dropout_prob',
                                              self.dropout_prob),
             ('k_softmaxes',
              self.k_softmaxes), ('non_linearity', self.non_linearity),
             ('learning_rate',
              self.learning_rate), ('batch_size', self.batch_size)])

        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor='test_loss', patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            '/tmp/dan.pt',
                            monitor='test_loss'),
            Tensorboard(tb_experiment)
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(n_batches_train,
                                                               t_x_train,
                                                               t_offset_train,
                                                               t_y_train,
                                                               evaluate=False)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(n_batches_test,
                                                            t_x_test,
                                                            t_offset_test,
                                                            t_y_test,
                                                            evaluate=True)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_loss)

        log.info('Done training')
コード例 #9
0
ファイル: dan.py プロジェクト: amit2014/qb
    def train(self, training_data: TrainingData) -> None:
        x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data
        )

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab, expand_glove=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train_text]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test_text]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        self.n_classes = compute_n_classes(training_data[1])

        n_batches_train, t_x_train, t_offset_train, t_y_train = batchify(
            self.batch_size, x_train, y_train, truncate=True)
        n_batches_test, t_x_test, t_offset_test, t_y_test = batchify(
            self.batch_size, x_test, y_test, truncate=False)

        self.model = DanModel(embeddings.shape[0], self.n_classes)
        self.model.init_weights(initial_embeddings=embeddings)
        self.model.cuda()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.CrossEntropyLoss()

        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1), MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model), '/tmp/dan.pt', monitor='test_acc')
            # Tensorboard('dan', log_dir='tb-logs')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(
                n_batches_train,
                t_x_train, t_offset_train, t_y_train, evaluate=False
            )

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(
                n_batches_test,
                t_x_test, t_offset_test, t_y_test, evaluate=True
            )

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if stop_training:
                log.info(' '.join(reasons))
                break

        log.info('Done training')
コード例 #10
0
    def train(self, training_data: TrainingData):
        x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data)

        if self.use_wiki:
            wiki_dataset = FilteredWikipediaDataset()
            wiki_train_data = wiki_dataset.training_data()
            w_x_train_text, w_train_y, _, _, _, _, _ = preprocess_dataset(
                wiki_train_data,
                train_size=1,
                vocab=vocab,
                class_to_i=class_to_i,
                i_to_class=i_to_class)
            x_train_text.extend(w_x_train_text)
            y_train.extend(w_train_y)

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab,
                                                       expand_glove=True,
                                                       mask_zero=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [
            convert_text_to_embeddings_indices(q,
                                               embedding_lookup,
                                               random_unk_prob=.05)
            for q in x_train_text
        ]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [
            convert_text_to_embeddings_indices(q,
                                               embedding_lookup,
                                               random_unk_prob=.05)
            for q in x_test_text
        ]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        self.n_classes = compute_n_classes(training_data[1])

        n_batches_train, t_x_train, lengths_train, t_y_train, _ = batchify(
            self.batch_size, x_train, y_train, truncate=True)
        n_batches_test, t_x_test, lengths_test, t_y_test, _ = batchify(
            self.batch_size, x_test, y_test, truncate=False)

        self.model = RnnModel(embeddings.shape[0], self.n_classes)
        self.model.init_weights(embeddings=embeddings)
        self.model.cuda()
        self.optimizer = Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                        'max',
                                                        patience=5,
                                                        verbose=True)

        manager = TrainingManager([
            BaseLogger(log_func=log.info),
            TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model),
                            '/tmp/rnn.pt',
                            monitor='test_acc')
            #Tensorboard('rnn', log_dir='tb-logs')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(n_batches_train,
                                                               t_x_train,
                                                               lengths_train,
                                                               t_y_train,
                                                               evaluate=False)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(n_batches_test,
                                                            t_x_test,
                                                            lengths_test,
                                                            t_y_test,
                                                            evaluate=True)

            stop_training, reasons = manager.instruct(train_time, train_loss,
                                                      train_acc, test_time,
                                                      test_loss, test_acc)

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)

        log.info('Done training')
コード例 #11
0
ファイル: dan.py プロジェクト: nadesai/qb
    def train(self, training_data: TrainingData) -> None:

        if self.use_qb:
            x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
                training_data
            )
            if self.use_wiki:
                wiki_dataset = WikipediaDataset(set(training_data[1]))
                wiki_train_data = wiki_dataset.training_data()
                w_x_train_text, w_train_y, _, _, _, _, _ = preprocess_dataset(
                    wiki_train_data, train_size=1, vocab=vocab, class_to_i=class_to_i, i_to_class=i_to_class
                )
                x_train_text.extend(w_x_train_text)
                y_train.extend(w_train_y)
        else:
            if self.use_wiki:
                wiki_dataset = WikipediaDataset(set(training_data[1]))
                wiki_train_data = wiki_dataset.training_data()
                x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
                    wiki_train_data
                )
            else:
                raise ValueError('use_wiki and use_qb cannot both be false, otherwise there is no training data')

        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab, expand_glove=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train_text]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test_text]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        self.n_classes = compute_n_classes(training_data[1])

        log.info(f'Batching: {len(x_train)} train questions and {len(x_test)} test questions')

        n_batches_train, t_x_train, t_offset_train, t_y_train = batchify(
            self.batch_size, x_train, y_train, truncate=True)
        n_batches_test, t_x_test, t_offset_test, t_y_test = batchify(
            self.batch_size, x_test, y_test, truncate=False)

        self.vocab_size = embeddings.shape[0]
        self.model = DanModel(self.vocab_size, self.n_classes, embeddings=embeddings)
        if CUDA:
            self.model = self.model.cuda()
        log.info(f'Model:\n{self.model}')

        self.optimizer = Adam(self.model.parameters(), lr=self.learning_rate)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=5, verbose=True, mode='max')

        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1), MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model), '/tmp/dan.pt', monitor='test_acc')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(
                n_batches_train,
                t_x_train, t_offset_train, t_y_train, evaluate=False
            )

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(
                n_batches_test,
                t_x_test, t_offset_test, t_y_test, evaluate=True
            )

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)

        log.info('Done training')
コード例 #12
0
ファイル: bcn.py プロジェクト: nadesai/qb
    def train(self, training_data) -> None:
        log.info('Preprocessing data')
        x_train_text, y_train, x_test_text, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
            training_data
        )
        self.class_to_i = class_to_i
        self.i_to_class = i_to_class
        self.vocab = vocab

        embeddings, embedding_lookup = load_embeddings(vocab=vocab, expand_glove=True, mask_zero=True)
        self.embeddings = embeddings
        self.embedding_lookup = embedding_lookup

        x_train = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_train_text]
        for r in x_train:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        x_test = [convert_text_to_embeddings_indices(q, embedding_lookup) for q in x_test_text]
        for r in x_test:
            if len(r) == 0:
                r.append(embedding_lookup['UNK'])
        x_test = np.array(x_test)
        y_test = np.array(y_test)

        log.info('Batching data')
        n_batches_train, t_x_train, lengths_train, masks_train, t_y_train = batchify(
            self.batch_size, x_train, y_train, truncate=True
        )
        n_batches_test, t_x_test, lengths_test, masks_test, t_y_test = batchify(
            self.batch_size, x_test, y_test, truncate=False, shuffle=False
        )

        self.n_classes = compute_n_classes(training_data[1])

        log.info('Creating model')
        self.model = BCN(
            300, 500, embeddings.shape[0], self.n_classes,
            We=torch.from_numpy(embeddings)
        ).cuda()
        self.optimizer = Adam(self.model.parameters())
        self.criterion = nn.NLLLoss()

        log.info(f'Model:\n{self.model}')

        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(),
            EarlyStopping(monitor='test_acc', patience=10, verbose=1), MaxEpochStopping(100),
            ModelCheckpoint(create_save_model(self.model), '/tmp/bcn.pt', monitor='test_acc'),
            Tensorboard('bcn')
        ])

        log.info('Starting training...')
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(
                n_batches_train,
                t_x_train, lengths_train, masks_train, t_y_train, evaluate=False
            )

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(
                n_batches_test,
                t_x_test, lengths_test, masks_test, t_y_test, evaluate=True
            )

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if stop_training:
                log.info(' '.join(reasons))
                break