def score(self, qa_pairs, model, data, task=None):
        input_ids_lst = []
        input_mask_lst = []
        segment_ids_lst = []
        label_ids_lst = []

        for (q, a, label) in qa_pairs:
            cache_id = '{}-{}'.format(id(q), id(a))
            example_features = self._cache.get(cache_id)

            if example_features is None:
                example = InputExample(q.metadata['id'], q.text, a.text, label=label)
                example_features = convert_examples_to_features([example], self.length_question + self.length_answer,
                                             model.tokenizer, self.logger)[0]
                self._cache[cache_id] = example_features

            input_ids_lst.append(example_features.input_ids)
            input_mask_lst.append(example_features.input_mask)
            segment_ids_lst.append(example_features.segment_ids)
            label_ids_lst.append(example_features.label_id)

        input_ids = torch.tensor(input_ids_lst, dtype=torch.long).to(model.device)
        input_mask = torch.tensor(input_mask_lst, dtype=torch.long).to(model.device)
        segment_ids = torch.tensor(segment_ids_lst, dtype=torch.long).to(model.device)
        label_ids = torch.tensor(label_ids_lst, dtype=torch.float).to(model.device)

        with torch.no_grad():
            scores = model.bert(input_ids, segment_ids, input_mask, label_ids, tasks=task)
            # loss_fct = nn.BCELoss()
            # loss = loss_fct(scores, label_ids.view(-1, 1))

        return scores.squeeze(dim=1).cpu().numpy(), None  # loss.cpu().numpy()
示例#2
0
    def get_next_batch(self, model, data):
        self.cur_dataset = (self.cur_dataset + 1) % len(self.train_questions)
        n_positive = self.batchsize // self.n_train_answers
        example_index = self.get_batch_indices(self.cur_dataset, n_positive)

        batch_examples = []
        for i in example_index:
            unrelated_answers = [
                InputExample(
                    self.train_questions[self.cur_dataset][i].metadata['id'],
                    self.train_questions[self.cur_dataset][i].text,
                    a.text,
                    label=0.0) for a in self.get_random_answers(
                        self.cur_dataset, self.n_train_answers - 1)
            ]
            unrelated_answers_features = convert_examples_to_features(
                unrelated_answers,
                self.length_question + self.length_answer,
                model.tokenizer,
                self.logger,
                show_example=False)
            batch_examples += [
                self.train_positive_instances[self.cur_dataset][i]
            ] + unrelated_answers_features

        self.batch_i += n_positive
        return batch_examples
示例#3
0
    def prepare_training(self, model, data):
        super(BertTrainingPointwiseAllQSameBatch,
              self).prepare_training(model, data)

        # create features for all training examples
        for i, dataset in enumerate(data.datas_train):
            dataset_examples = []
            for pool in dataset.archive.train.qa:
                for gt in pool.ground_truth:
                    self.train_questions[i].append(pool.question)
                    dataset_examples.append(
                        InputExample(pool.question.metadata['id'],
                                     pool.question.text,
                                     gt.text,
                                     label=1.0))
            self.train_positive_instances[i] = convert_examples_to_features(
                dataset_examples, self.length_question + self.length_answer,
                model.tokenizer, self.logger)

        # calculate number of batches
        min_examples = min([len(v) for v in self.train_questions.values()])
        n_datasets = len(self.train_questions)
        if self.epoch_max_examples is None or min_examples * n_datasets < self.epoch_max_examples:
            examples_per_epoch = min_examples * n_datasets
        else:
            examples_per_epoch = self.epoch_max_examples
        self.n_batches = examples_per_epoch
    def prepare_training(self, model, data):
        super(BertTrainingPointwiseWithNegativePoolsAllQSameBatch,
              self).prepare_training(model, data)

        # create features for all training examples
        for i, dataset in enumerate(data.datas_train):
            dataset_batches = []
            for pool in dataset.archive.train.qa:
                batch = []
                for a in pool.pooled_answers:
                    batch.append(
                        InputExample(
                            pool.question.metadata['id'],
                            pool.question.text,
                            a.text,
                            label=1.0 if a in pool.ground_truth else 0.0))
                dataset_batches.append(
                    convert_examples_to_features(batch,
                                                 self.length_question +
                                                 self.length_answer,
                                                 model.tokenizer,
                                                 self.logger,
                                                 show_example=False))
            self.batches[i] = dataset_batches

        # calculate number of batches
        min_batches = min([len(v) for v in self.batches.values()])
        self.n_batches = min_batches * len(self.batches)
    def prepare_training(self, model, data):
        super(BertTrainingPointwiseWithNegativePools,
              self).prepare_training(model, data)

        # create features for all training examples
        for i, dataset in enumerate(data.datas_train):
            dataset_examples = []
            for pool in dataset.archive.train.qa:
                for a in pool.ground_truth:
                    input_a, input_b = self.potentially_flip_inputs(
                        pool.question.text, a.text)
                    dataset_examples.append(
                        InputExample(pool.question.metadata['id'],
                                     input_a,
                                     input_b,
                                     label=1.0))

                neg = []

                pot_neg_answer_list = pool.pooled_answers
                if self.negative_sample_from_top_n is not False:
                    pot_neg_answer_list = list(
                        pool.pooled_answers[:self.negative_sample_from_top_n])
                    random.shuffle(pot_neg_answer_list)

                for a in pot_neg_answer_list:
                    if a in pool.ground_truth:
                        continue

                    input_a, input_b = self.potentially_flip_inputs(
                        pool.question.text, a.text)
                    neg.append(
                        InputExample(pool.question.metadata['id'],
                                     input_a,
                                     input_b,
                                     label=0.0))
                    if len(neg) >= self.max_negative_per_answer * len(
                            pool.ground_truth):
                        break
                dataset_examples += neg

            self.train_instances[i] = convert_examples_to_features(
                dataset_examples, self.length_question + self.length_answer,
                model.tokenizer, self.logger)

        # calculate number of batches
        min_examples = min([len(v) for v in self.train_instances.values()])
        n_datasets = len(self.train_instances)
        if self.epoch_max_examples is None or min_examples * n_datasets < self.epoch_max_examples:
            examples_per_epoch = min_examples * n_datasets
        else:
            examples_per_epoch = self.epoch_max_examples
        self.n_batches = math.ceil(examples_per_epoch / float(self.batchsize))
 def prepare_training(self, model, data):
     examples = []
     for i, dataset in enumerate(data.datas_train):
         for pool in dataset.archive.train.qa:
             for gt in pool.ground_truth:
                 self.train_questions.append(pool.question)
                 self.train_dataset_ids.append(i)
                 examples.append(
                     InputExample(pool.question.metadata['id'], pool.question.text, gt.text, label=1.0)
                 )
     self.train_positive_instances = convert_examples_to_features(
         examples, self.length_question + self.length_answer, model.tokenizer, self.logger
     )
    def get_next_batch(self, model, data):
        batchsize_half = int(self.batchsize / 2)
        indices = self.epoch_random_indices[self.batch_i * batchsize_half: (self.batch_i + 1) * batchsize_half]

        batch_examples = []
        prediction_examples = []

        for i in indices:
            batch_examples.append(self.train_positive_instances[i])

            prediction_examples += [InputExample(self.train_questions[i].metadata['id'],
                                                 self.train_questions[i].text,
                                                 next(self.random_pools[self.train_dataset_ids[i]]).text,
                                                 label=0.0) for _ in range(self.n_train_answers)]

        prediction_examples = convert_examples_to_features(
            prediction_examples, self.length_question + self.length_answer, model.tokenizer, self.logger
        )

        # we only execute all this negative sampling using the network IF we choose between more than one neg. answer
        if self.n_train_answers > 1:
            prediction_results = []
            model.bert.eval()
            for predict_batch in range(int(math.ceil(len(prediction_examples) / float(self.batchsize_neg_ranking)))):
                batch_start_idx = predict_batch * self.batchsize_neg_ranking
                predict_batch_examples = prediction_examples[
                                         batch_start_idx: batch_start_idx + self.batchsize_neg_ranking]

                input_ids = torch.tensor([f.input_ids for f in predict_batch_examples], dtype=torch.long).to(
                    model.device)
                input_mask = torch.tensor([f.input_mask for f in predict_batch_examples], dtype=torch.long).to(
                    model.device)
                segment_ids = torch.tensor([f.segment_ids for f in predict_batch_examples], dtype=torch.long).to(
                    model.device)

                with torch.no_grad():
                    scores = model.bert(input_ids, segment_ids, input_mask)
                    prediction_results += scores.squeeze(dim=1).tolist()

            for count, i in enumerate(indices):
                predictions = prediction_results[self.n_train_answers * count:self.n_train_answers * (count + 1)]
                incorrect_example = prediction_examples[np.argmax(predictions) + self.n_train_answers * count]
                batch_examples.append(incorrect_example)
        else:
            batch_examples += prediction_examples

        self.batch_i += 1
        return batch_examples
示例#8
0
    def _get_mean_adapter(self, model, pool):
        question = [InputExample("", pool.question.text, label=1.0)]

        features = convert_examples_to_features(
            question, self.length_question + self.length_answer,
            model.tokenizer, self.logger)
        input_ids = torch.tensor([f.input_ids for f in features],
                                 dtype=torch.long).to(model.device)
        input_mask = torch.tensor([f.input_mask for f in features],
                                  dtype=torch.long).to(model.device)
        segment_ids = torch.tensor([f.segment_ids for f in features],
                                   dtype=torch.long).to(model.device)

        with torch.no_grad():
            q_vector = model.bert.average_standard_bert_output(
                input_ids, segment_ids, input_mask).cpu().numpy()
            q_vector = np.squeeze(q_vector / norm(q_vector))
        if self.mean_adapter_similarity == "cosine":
            similarity = 1 - np.dot(self.vectors, q_vector)
        task = self.tasks[np.argmin(similarity)]
        return task
    def score(self, qa_pairs, model, data):
        eval_examples = [
            InputExample(q.metadata['id'], q.text, a.text, label=label)
            for (q, a, label) in qa_pairs
        ]

        eval_features = convert_examples_to_features(
            eval_examples, self.length_question + self.length_answer,
            model.tokenizer, self.logger)
        input_ids = torch.tensor([f.input_ids for f in eval_features],
                                 dtype=torch.long).to(model.device)
        input_mask = torch.tensor([f.input_mask for f in eval_features],
                                  dtype=torch.long).to(model.device)
        segment_ids = torch.tensor([f.segment_ids for f in eval_features],
                                   dtype=torch.long).to(model.device)
        label_ids = torch.tensor([f.label_id for f in eval_features],
                                 dtype=torch.float).to(model.device)

        with torch.no_grad():
            scores, loss = model.bert(input_ids, segment_ids, input_mask,
                                      label_ids)

        return scores.squeeze(dim=1).cpu().numpy(), loss.cpu().numpy()