示例#1
0
    def write_prediction(self, config, test_set):
        """Performs an epoch of evaluation

        Args:
            config: (Config) with batch_size and dir_answers
            test_set:(Dataset) instance

        Returns:
            files: (list) of path to files
            perp: (float) perplexity on test set

        """
        # initialize containers of references and predictions
        if self._config.decoding == "greedy":
            refs, hyps = [], [[]]
        elif self._config.decoding == "beam_search":
            refs, hyps = [], [[] for i in range(self._config.beam_size)]

        # iterate over the dataset
        n_words, ce_words = 0, 0  # sum of ce for all words + nb of words
        for img, formula in minibatches(test_set, config.batch_size):
            fd = self._get_feed_dict(img,
                                     training=False,
                                     formula=formula,
                                     dropout=1)
            ce_words_eval, n_words_eval, ids_eval = self.sess.run(
                [self.ce_words, self.n_words, self.pred_test.ids],
                feed_dict=fd)
            # TODO(guillaume): move this logic into tf graph
            if self._config.decoding == "greedy":
                ids_eval = np.expand_dims(ids_eval, axis=1)

            elif self._config.decoding == "beam_search":
                ids_eval = np.transpose(ids_eval, [0, 2, 1])
            # print("---------------------------------------------------------------after decoding :")
            # print(ids_eval)
            n_words += n_words_eval
            ce_words += ce_words_eval
            # print("---------------------------------------------------------------formula and prediction :")
            for form, preds in zip(formula, ids_eval):
                refs.append(form)
                # print(form, "    ----------    ", preds[0])
                for i, pred in enumerate(preds):
                    hyps[i].append(pred)

        files = write_answers(refs, hyps, self._vocab.id_to_tok,
                              config.dir_answers, self._vocab.id_end)

        perp = -np.exp(ce_words / float(n_words))

        return files, perp
示例#2
0
    def write_prediction(self, config, test_set):
        """Performs an epoch of evaluation

        Args:
            config: (Config) with batch_size and dir_answers
            test_set:(Dataset) instance

        Returns:
            files: (list) of path to files
            perp: (float) perplexity on test set

        """
        # initialize containers of references and predictions
        if self._config.decoding == "greedy":
            refs, hyps = [], [[]]
        elif self._config.decoding == "beam_search":
            refs, hyps = [], [[] for i in range(self._config.beam_size)]

        nbatches = (len(test_set) + config.batch_size - 1) // config.batch_size
        prog = Progbar(nbatches)
        n_words, ce_words = 0, 0  # sum of ce for all words + nb of words
        for i, (img,
                formula) in enumerate(minibatches(test_set,
                                                  config.batch_size)):
            fd = self._get_feed_dict(img, formula=formula, dropout=1)
            ce_words_eval, n_words_eval, ids_eval = self.sess.run(
                [self.ce_words, self.n_words, self.pred_test.ids],
                feed_dict=fd)

            if self._config.decoding == "greedy":
                ids_eval = np.expand_dims(ids_eval, axis=1)
            elif self._config.decoding == "beam_search":
                ids_eval = np.transpose(ids_eval, [0, 2, 1])
            n_words += n_words_eval
            ce_words += ce_words_eval

            for form, preds in zip(formula, ids_eval):
                refs.append(form)
                for j, pred in enumerate(preds):
                    hyps[j].append(pred)

            prog.update(i + 1,
                        [("perplexity", -np.exp(ce_words / float(n_words)))])

        files = write_answers(refs, hyps, self._vocab.id_to_tok,
                              config.dir_answers, self._vocab.id_end)

        perp = -np.exp(ce_words / float(n_words))

        return files, perp
示例#3
0
    def _run_evaluate_epoch(self, config, test_set):
        """Performs an epoch of evaluation
        Args:
            test_set: Dataset instance
            params: (dict) with extra params in it
                - "dir_name": (string)
        Returns:
            scores: (dict) scores["acc"] = 0.85 for instance
        """
        self.model.eval()
        self.encoder.eval()
        self.decoder.eval()
        # initialize containers of references and predictions
        if self._config.decoding == "greedy":
            refs, hyps = [], [[]]
        elif self._config.decoding == "beam_search":
            refs, hyps = [], [[] for i in range(self._config.beam_size)]
        references = list()  # references (true captions) for calculating BLEU-4 score
        hypotheses = list()  # hypotheses (predictions)
        with torch.no_grad():
            nbatches = len(test_set)
            prog = Progbar(nbatches)
            test_loader = torch.utils.data.DataLoader(ImgFormulaDataset(test_set),
                                                      batch_size=nbatches,
                                                      shuffle=True, num_workers=3, pin_memory=True)

            for i, (img, formula) in enumerate(minibatches(test_set, nbatches)):
                # print(type(img), len(img), img[0].shape)
                # print(type(formula), formula)
                # Move to GPU, if available
                img = pad_batch_images_2(img)
                img = torch.FloatTensor(img)  # (N, W, H, C)
                formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
                img = img.permute(0, 3, 1, 2)  # (N, C, W, H)
                formula = torch.LongTensor(formula)  # (N,)
                img = img.to(self.device)
                formula = formula.to(self.device)

                # Forward prop.
                imgs = self.encoder(img)
                scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, formula, torch.LongTensor([[len(i)] for i in formula]))

                # Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
                targets = caps_sorted[:, 1:]

                # Remove timesteps that we didn't decode at, or are pads
                # pack_padded_sequence is an easy trick to do this
                scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
                targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)

                # Calculate loss
                loss = self.criterion(scores, targets)

                print(scores.shape, targets.shape)
                print(loss)

                alpha_c = 1.
                # Add doubly stochastic attention regularization
                loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()

                loss_eval = loss.item()

                prog.update(i + 1, [("loss", loss_eval), ("perplexity", np.exp(loss_eval))])

                # Store references (true captions), and hypothesis (prediction) for each image
                # If for n images, we have n hypotheses, and references a, b, c... for each image, we need -
                # references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]
                # print("---------------------------------------------------------------formula and prediction :")
                for form, preds in zip(formula, scores):
                    refs.append(form)
                    # print(form, "    ----------    ", preds[0])
                    for i, pred in enumerate(preds):
                        hyps[i].append(pred)

            files = write_answers(refs, hyps, self._vocab.id_to_tok, config.dir_answers, self._vocab.id_end)
            scores = score_files(files[0], files[1])
            # perp = - np.exp(ce_words / float(n_words))
            # scores["perplexity"] = perp

        self.logger.info("- Evaluating: {}".format(prog.info))

        return {
            "perplexity": loss.item()
        }