예제 #1
0
    def predict(
        self,
        samples: Dict[str, str],
        cuda: bool = False,
        show_progress: bool = False
    ) -> (Dict[str, Union[str, float]], List[float]):
        """ Function that runs a model prediction,
        
        :param samples: List of dictionaries with 'mt' and 'ref' keys.
        :param cuda: Flag that runs inference using 1 single GPU.
        :param show_progress: Flag to show progress during inference of multiple examples.
        
        :return: Dictionary with model outputs
        """
        if self.training:
            self.eval()

        if cuda and torch.cuda.is_available():
            self.to("cuda")

        with torch.no_grad():
            batches = [
                samples[i:i + self.hparams.batch_size]
                for i in range(0, len(samples), self.hparams.batch_size)
            ]
            model_inputs = []
            if show_progress:
                pbar = tqdm(total=len(batches),
                            desc="Preparing batches....",
                            dynamic_ncols=True)
            for batch in batches:
                model_inputs.append(self.prepare_sample(batch, inference=True))
                if show_progress:
                    pbar.update(1)

            if show_progress:
                pbar.close()

            if show_progress:
                pbar = tqdm(total=len(batches),
                            desc="Scoring hypothesis...",
                            dynamic_ncols=True)

            distance_weighted, distance_src, distance_ref = [], [], []
            for k, model_input in enumerate(model_inputs):
                src_input, mt_input, ref_input, alt_input = model_input
                if cuda and torch.cuda.is_available():
                    src_embeddings = self.get_sentence_embedding(
                        **move_to_cuda(src_input))
                    mt_embeddings = self.get_sentence_embedding(
                        **move_to_cuda(mt_input))
                    ref_embeddings = self.get_sentence_embedding(
                        **move_to_cuda(ref_input))
                    ref_distances = F.pairwise_distance(
                        mt_embeddings, ref_embeddings).cpu()
                    src_distances = F.pairwise_distance(
                        mt_embeddings, src_embeddings).cpu()

                    # When 2 references are given the distance to the reference is the Min between
                    # both references.
                    if alt_input is not None:
                        alt_embeddings = self.get_sentence_embedding(
                            **move_to_cuda(alt_input))
                        alt_distances = F.pairwise_distance(
                            mt_embeddings, alt_embeddings).cpu()
                        ref_distances = torch.stack(
                            [ref_distances, alt_distances])
                        ref_distances = ref_distances.min(dim=0).values

                else:
                    src_embeddings = self.get_sentence_embedding(**src_input)
                    mt_embeddings = self.get_sentence_embedding(**mt_input)
                    ref_embeddings = self.get_sentence_embedding(**ref_input)
                    ref_distances = F.pairwise_distance(
                        mt_embeddings, ref_embeddings)
                    src_distances = F.pairwise_distance(
                        mt_embeddings, src_embeddings)

                # Harmonic mean between the distances:
                distances = (2 * ref_distances *
                             src_distances) / (ref_distances + src_distances)
                src_distances = ref_distances.numpy().tolist()
                ref_distances = ref_distances.numpy().tolist()
                distances = distances.numpy().tolist()

                for i in range(len(distances)):
                    distance_weighted.append(1 / (1 + distances[i]))
                    distance_src.append(1 / (1 + src_distances[i]))
                    distance_ref.append(1 / (1 + ref_distances[i]))

                if show_progress:
                    pbar.update(1)

            if show_progress:
                pbar.close()

        scores = []
        for i in range(len(samples)):
            samples[i]["langid"] = {
                "src": self.langid(samples[i]["src"]),
                "mt": self.langid(samples[i]["mt"]),
                "ref": self.langid(samples[i]["ref"]),
            }
            if (samples[i]["langid"]["src"] == samples[i]["langid"]["mt"] and
                    samples[i]["langid"]["mt"] != samples[i]["langid"]["ref"]):
                scores.append(distance_ref[i])
            else:
                scores.append(distance_weighted[i])

            samples[i]["predicted_score"] = scores[-1]
            samples[i]["reference_distance"] = distance_ref[i]
            samples[i]["source_distance"] = distance_src[i]

        return samples, scores
예제 #2
0
    def predict(
        self,
        samples: List[Dict[str, str]],
        cuda: bool = False,
        show_progress: bool = False,
    ) -> (Dict[str, Union[str, float]], List[float]):
        """Function that runs a model prediction,
        
        :param samples: List of dictionaries with 'mt' and 'ref' keys.
        :param cuda: Flag that runs inference using 1 single GPU.
        :param show_progress: Flag to show progress during inference of multiple examples.

        :return: Dictionary with original samples, predicted scores and langid results for SRC and MT 
            + list of predicted scores
        """
        if self.training:
            self.eval()

        if cuda and torch.cuda.is_available():
            self.to("cuda")

        with torch.no_grad():
            batches = [
                samples[i:i + self.hparams.batch_size]
                for i in range(0, len(samples), self.hparams.batch_size)
            ]
            model_inputs = []
            if show_progress:
                pbar = tqdm(
                    total=len(batches),
                    desc="Preparing batches...",
                    dynamic_ncols=True,
                    leave=None,
                )
            for batch in batches:
                batch = self.prepare_sample(batch, inference=True)
                model_inputs.append(batch)
                if show_progress:
                    pbar.update(1)

            if show_progress:
                pbar.close()

            if show_progress:
                pbar = tqdm(
                    total=len(batches),
                    desc="Scoring hypothesis...",
                    dynamic_ncols=True,
                    leave=None,
                )
            scores = []
            for model_input in model_inputs:
                if cuda and torch.cuda.is_available():
                    model_input = move_to_cuda(model_input)
                    model_out = self.forward(**model_input)
                    model_out = move_to_cpu(model_out)
                else:
                    model_out = self.forward(**model_input)

                model_scores = model_out["score"].numpy().tolist()
                for i in range(len(model_scores)):
                    scores.append(model_scores[i][0])

                if show_progress:
                    pbar.update(1)

            if show_progress:
                pbar.close()

        assert len(scores) == len(samples)
        return samples, scores
예제 #3
0
    def document_predict(
        self,
        documents: List[Dict[str, List[str]]],
        cuda: bool = False,
        show_progress: bool = False,
    ) -> (Dict[str, Union[str, float]], List[float]):
        """Function that scores entire documents by processing all segments in parallel.

        :param documents: List of dictionaries with 'mt', 'src' and 'ref' keys where each key is
            a list of segments.
        :param cuda: Flag that runs inference using 1 single GPU.
        :param show_progress: Flag to show progress during inference of multiple examples.

        :return: tuple with Dictionary with original samples and predicted document score, micro 
            average scores, macro average scores.
        """
        if self.training:
            self.eval()

        if cuda and torch.cuda.is_available():
            self.to("cuda")

        inputs, lengths = [], []
        for d in documents:
            d = [dict(zip(d, t)) for t in zip(*d.values())]
            # For very long documents we need to create chunks.
            # (64 sentences per chunk)
            if len(d) > 64:
                document_chunks, document_lengths = [], []
                chunks = [d[i:i + 64] for i in range(0, len(d), 64)]
                for chunk in chunks:
                    chunk = self.prepare_sample(chunk, inference=True)
                    document_lengths.append(chunk["mt_lengths"])
                    if cuda and torch.cuda.is_available():
                        document_chunks.append(chunk)
                lengths.append(torch.cat(document_lengths, dim=0))
                inputs.append(document_chunks)
            else:
                d_input = self.prepare_sample(d, inference=True)
                lengths.append(d_input["mt_lengths"])
                if cuda and torch.cuda.is_available():
                    inputs.append(d_input)

        micro_average, average = [], []
        for doc, seg_lengths in tqdm(
                zip(inputs, lengths),
                total=len(inputs),
                desc="Scoring Documents ...",
                dynamic_ncols=True,
                leave=None,
        ):
            if isinstance(doc, list):
                seg_scores = []
                for chunk in doc:
                    model_output = self.forward(**move_to_cuda(chunk))
                    seg_scores.append(
                        move_to_cpu(model_output)["score"].view(1, -1)[0])
                seg_scores = torch.cat(seg_scores, dim=0)
            else:
                model_output = self.forward(**move_to_cuda(doc))
                seg_scores = move_to_cpu(model_output)["score"].view(1, -1)[0]

            # Invert segment-level scores for HTER
            # seg_scores = torch.ones_like(seg_scores) -  seg_scores
            micro = (seg_scores * seg_lengths).sum() / seg_lengths.sum()
            macro = seg_scores.sum() / seg_scores.size()[0]
            micro_average.append(micro.item())
            average.append(macro.item())

        assert len(micro_average) == len(documents)
        for i in range(len(documents)):
            documents[i]["predicted_score"] = micro_average[i]

        return documents, micro_average, average
예제 #4
0
    def predict(
        self,
        samples: Dict[str, str],
        cuda: bool = False,
        show_progress: bool = False
    ) -> (Dict[str, Union[str, float]], List[float]):
        """Function that runs a model prediction,
        
        :param samples: List of dictionaries with 'mt' and 'ref' keys.
        :param cuda: Flag that runs inference using 1 single GPU.
        :param show_progress: Flag to show progress during inference of multiple examples.

        :return: Dictionary with original samples + predicted scores and list of predicted scores
        """
        if self.training:
            self.eval()

        if cuda and torch.cuda.is_available():
            self.to("cuda")

        with torch.no_grad():
            batches = [
                samples[i:i + self.hparams.batch_size]
                for i in range(0, len(samples), self.hparams.batch_size)
            ]
            model_inputs = []
            if show_progress:
                pbar = tqdm(
                    total=len(batches),
                    desc="Preparing batches...",
                    dynamic_ncols=True,
                    leave=None,
                )
            for batch in batches:
                model_inputs.append(self.prepare_sample(batch, inference=True))
                if show_progress:
                    pbar.update(1)

            if show_progress:
                pbar.close()

            if show_progress:
                pbar = tqdm(
                    total=len(batches),
                    desc="Scoring hypothesis...",
                    dynamic_ncols=True,
                    leave=None,
                )
            scores = []
            for model_input in model_inputs:
                mt_input, ref_input = model_input
                if cuda and torch.cuda.is_available():
                    mt_embeddings = self.get_sentence_embedding(
                        **move_to_cuda(mt_input))
                    ref_embeddings = self.get_sentence_embedding(
                        **move_to_cuda(ref_input))
                    distances = F.pairwise_distance(mt_embeddings,
                                                    ref_embeddings).cpu()
                else:
                    mt_embeddings = self.get_sentence_embedding(**mt_input)
                    ref_embeddings = self.get_sentence_embedding(**ref_input)
                    distances = F.pairwise_distance(mt_embeddings,
                                                    ref_embeddings)

                distances = distances.numpy().tolist()
                for i in range(len(distances)):
                    scores.append(1 / (1 + distances[i]))

                if show_progress:
                    pbar.update(1)

            if show_progress:
                pbar.close()

        assert len(scores) == len(samples)
        for i in range(len(scores)):
            samples[i]["predicted_score"] = scores[i]
        return samples, scores