Exemple #1
0
    def test_mean_absolute_error_computation(self, device: str):
        mae = MeanAbsoluteError()
        predictions = torch.tensor(
            [[1.0, 1.5, 1.0], [2.0, 3.0, 3.5], [4.0, 5.0, 5.5], [6.0, 7.0, 7.5]], device=device
        )
        targets = torch.tensor(
            [[0.0, 1.0, 0.0], [2.0, 2.0, 0.0], [4.0, 5.0, 0.0], [7.0, 7.0, 0.0]], device=device
        )
        mae(predictions, targets)
        assert mae.get_metric() == 21.0 / 12.0

        mask = torch.tensor(
            [[True, True, False], [True, True, False], [True, True, False], [True, True, False]],
            device=device,
        )
        mae(predictions, targets, mask)
        assert mae.get_metric() == (21.0 + 3.5) / (12.0 + 8.0)

        new_targets = torch.tensor(
            [[2.0, 2.0, 0.0], [0.0, 1.0, 0.0], [7.0, 7.0, 0.0], [4.0, 5.0, 0.0]], device=device
        )
        mae(predictions, new_targets)
        assert mae.get_metric() == (21.0 + 3.5 + 32.0) / (12.0 + 8.0 + 12.0)

        mae.reset()
        mae(predictions, new_targets)
        assert mae.get_metric() == 32.0 / 12.0
Exemple #2
0
    def __init__(
        self,
        vocab: Vocabulary,
        text_field_embedder: TextFieldEmbedder,
        encoder: Seq2SeqEncoder,
        qdep_henc_rnn: Seq2SeqEncoder,
        senc_self_attn: GatedMultifactorSelfAttnEnc,
        attnpool: AttnPooling,
        output_ffl: FeedForward,
        initializer: InitializerApplicator,
        dropout: float = 0.3,
    ) -> None:
        super().__init__(vocab)
        self._text_field_embedder = text_field_embedder

        self._qdep_henc_rnn = qdep_henc_rnn
        self._senc_self_attn = senc_self_attn

        self._variational_dropout = InputVariationalDropout(dropout)
        self._attn_pool = attnpool
        self._output_ffl = output_ffl

        self._num_labels = vocab.get_vocab_size(namespace="labels")

        self._accuracy = CategoricalAccuracy()
        self._mae = MeanAbsoluteError()
        self._loss = torch.nn.MSELoss()
        self._softmax = torch.nn.Softmax(dim=1)
        initializer(self)
    def test_mean_absolute_error_computation(self):
        mae = MeanAbsoluteError()
        predictions = torch.Tensor([[1.0, 1.5, 1.0],
                                    [2.0, 3.0, 3.5],
                                    [4.0, 5.0, 5.5],
                                    [6.0, 7.0, 7.5]])
        targets = torch.Tensor([[0.0, 1.0, 0.0],
                                [2.0, 2.0, 0.0],
                                [4.0, 5.0, 0.0],
                                [7.0, 7.0, 0.0]])
        mae(predictions, targets)
        assert mae.get_metric() == 21.0 / 12.0

        mask = torch.Tensor([[1.0, 1.0, 0.0],
                             [1.0, 1.0, 0.0],
                             [1.0, 1.0, 0.0],
                             [1.0, 1.0, 0.0]])
        mae(predictions, targets, mask)
        assert mae.get_metric() == (21.0 + 3.5) / (12.0 + 8.0)

        new_targets = torch.Tensor([[2.0, 2.0, 0.0],
                                    [0.0, 1.0, 0.0],
                                    [7.0, 7.0, 0.0],
                                    [4.0, 5.0, 0.0]])
        mae(predictions, new_targets)
        assert mae.get_metric() == (21.0 + 3.5 + 32.0) / (12.0 + 8.0 + 12.0)

        mae.reset()
        mae(predictions, new_targets)
        assert mae.get_metric() == 32.0 / 12.0
Exemple #4
0
def multiple_runs(
    global_rank: int,
    world_size: int,
    gpu_id: Union[int, torch.device],
    metric: MeanAbsoluteError,
    metric_kwargs: Dict[str, List[Any]],
    desired_values: Dict[str, Any],
    exact: Union[bool, Tuple[float, float]] = True,
):

    kwargs = {}
    # Use the arguments meant for the process with rank `global_rank`.
    for argname in metric_kwargs:
        kwargs[argname] = metric_kwargs[argname][global_rank]

    for i in range(200):
        metric(**kwargs)

    assert desired_values["mae"] == metric.get_metric()["mae"]
Exemple #5
0
 def test_multiple_distributed_runs(self):
     predictions = [
         torch.tensor([[1.0, 1.5, 1.0], [2.0, 3.0, 3.5]]),
         torch.tensor([[4.0, 5.0, 5.5], [6.0, 7.0, 7.5]]),
     ]
     targets = [
         torch.tensor([[0.0, 1.0, 0.0], [2.0, 2.0, 0.0]]),
         torch.tensor([[4.0, 5.0, 0.0], [7.0, 7.0, 0.0]]),
     ]
     metric_kwargs = {"predictions": predictions, "gold_labels": targets}
     desired_values = {"mae": 21.0 / 12.0}
     run_distributed_test(
         [-1, -1],
         multiple_runs,
         MeanAbsoluteError(),
         metric_kwargs,
         desired_values,
         exact=True,
     )
    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 title_encoder: Seq2VecEncoder,
                 text_encoder: Seq2VecEncoder,
                 regressor_feedforward: FeedForward,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.title_encoder = title_encoder
        self.text_encoder = text_encoder
        self.regressor_feedforward = regressor_feedforward

        if text_field_embedder.get_output_dim() != title_encoder.get_input_dim(
        ):
            raise ConfigurationError(f"The output dimension of the text_field_"
                                     f"embedder must match the input dimension"
                                     f" of the summary_encoder. Found "
                                     f"{text_field_embedder.get_output_dim()} "
                                     f"and {title_encoder.get_input_dim()}, "
                                     f"respectively.")

        if text_field_embedder.get_output_dim() != text_encoder.get_input_dim(
        ):
            raise ConfigurationError(f"The output dimension of the text_field_"
                                     f"embedder must match the input dimension"
                                     f" of the summary_encoder. Found "
                                     f"{text_field_embedder.get_output_dim()} "
                                     f"and {text_encoder.get_input_dim()}, "
                                     f"respectively.")

        self.metrics = {
            "MAE": MeanAbsoluteError(),
        }
        self.loss = torch.nn.BCEWithLogitsLoss()
        initializer(self)
Exemple #7
0
    def __init__(self,
                 vocab: Vocabulary,
                 token_representation_dim: int,
                 encoder: Optional[Seq2SeqEncoder] = None,
                 decoder: Optional[Union[FeedForward, str]] = None,
                 contextualizer: Optional[Contextualizer] = None,
                 pretrained_file: Optional[str] = None,
                 transfer_contextualizer_from_pretrained_file: bool = False,
                 transfer_encoder_from_pretrained_file: bool = False,
                 freeze_encoder: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(SelectiveRegressor, self).__init__(vocab, regularizer)

        self._token_representation_dim = token_representation_dim
        self._contextualizer = contextualizer
        if encoder is None:
            encoder = PassThroughEncoder(
                input_dim=self._token_representation_dim)
        self._encoder = encoder

        # Load the contextualizer and encoder weights from the
        # pretrained_file if applicable
        if pretrained_file:
            archive = None
            if self._contextualizer and transfer_contextualizer_from_pretrained_file:
                logger.info("Attempting to load contextualizer weights from "
                            "pretrained_file at {}".format(pretrained_file))
                archive = load_archive(cached_path(pretrained_file))
                contextualizer_state = archive.model._contextualizer.state_dict(
                )
                contextualizer_layer_num = self._contextualizer._layer_num
                self._contextualizer.load_state_dict(contextualizer_state)
                if contextualizer_layer_num is not None:
                    logger.info("Setting layer num to {}".format(
                        contextualizer_layer_num))
                    self._contextualizer.set_layer_num(
                        contextualizer_layer_num)
                else:
                    self._contextualizer.reset_layer_num()
                logger.info("Successfully loaded contextualizer weights!")
            if transfer_encoder_from_pretrained_file:
                logger.info("Attempting to load encoder weights from "
                            "pretrained_file at {}".format(pretrained_file))
                if archive is None:
                    archive = load_archive(cached_path(pretrained_file))
                encoder_state = archive.model._encoder.state_dict()
                self._encoder.load_state_dict(encoder_state)
                logger.info("Successfully loaded encoder weights!")

        self._freeze_encoder = freeze_encoder
        for parameter in self._encoder.parameters():
            # If freeze is true, requires_grad should be false and vice versa.
            parameter.requires_grad_(not self._freeze_encoder)

        if decoder is None or decoder == "linear":
            # Create the default decoder (logistic regression) if it is not provided.
            decoder = FeedForward.from_params(
                Params({
                    "input_dim": self._encoder.get_output_dim(),
                    "num_layers": 1,
                    "hidden_dims": 1,
                    "activations": "linear"
                }))
            logger.info("No decoder provided to model, using default "
                        "decoder: {}".format(decoder))
        elif decoder == "mlp":
            # Create the MLP decoder
            decoder = FeedForward.from_params(
                Params({
                    "input_dim": self._encoder.get_output_dim(),
                    "num_layers": 2,
                    "hidden_dims": [1024, 1],
                    "activations": ["relu", "linear"]
                }))
            logger.info("Using MLP decoder: {}".format(decoder))
        self._decoder = decoder

        check_dimensions_match(self._token_representation_dim,
                               self._encoder.get_input_dim(),
                               "token representation dim", "encoder input dim")
        check_dimensions_match(self._encoder.get_output_dim(),
                               self._decoder.get_input_dim(),
                               "encoder output dim", "decoder input dim")
        check_dimensions_match(self._decoder.get_output_dim(), 1,
                               "decoder output dim",
                               "1, since we're predicting a real value")
        # SmoothL1Loss as described in "Neural Models of Factuality" (NAACL 2018)
        self.loss = torch.nn.SmoothL1Loss(reduction="none")
        self.metrics = {
            "mae": MeanAbsoluteError(),
            "pearson_r": PearsonCorrelation()
        }

        # Whether to run in error analysis mode or not, see commands.error_analysis
        self.error_analysis = False
        logger.info("Applying initializer...")
        initializer(self)