Пример #1
0
    def decode(
        self,
        input_vector: torch.Tensor,
        target_scale: torch.Tensor,
        decoder_lengths: torch.Tensor,
        hidden_state: HiddenState,
        n_samples: int = None,
    ) -> Tuple[torch.Tensor, bool]:
        """
        Decode hidden state of RNN into prediction. If n_smaples is given,
        decode not by using actual values but rather by
        sampling new targets from past predictions iteratively
        """
        if n_samples is None:
            output, _ = self.decode_all(input_vector,
                                        hidden_state,
                                        lengths=decoder_lengths)
            output_transformation = True
        else:
            # run in eval, i.e. simulation mode
            target_pos = self.target_positions
            lagged_target_positions = self.lagged_target_positions
            # repeat for n_samples
            input_vector = input_vector.repeat_interleave(n_samples, 0)
            hidden_state = self.rnn.repeat_interleave(hidden_state, n_samples)
            target_scale = apply_to_list(
                target_scale, lambda x: x.repeat_interleave(n_samples, 0))

            # define function to run at every decoding step
            def decode_one(
                idx,
                lagged_targets,
                hidden_state,
            ):
                x = input_vector[:, [idx]]
                x[:, 0, target_pos] = lagged_targets[-1]
                for lag, lag_positions in lagged_target_positions.items():
                    if idx > lag:
                        x[:, 0, lag_positions] = lagged_targets[-lag]
                prediction, hidden_state = self.decode_all(x, hidden_state)
                prediction = apply_to_list(
                    prediction, lambda x: x[:, 0])  # select first time step
                return prediction, hidden_state

            # make predictions which are fed into next step
            output = self.decode_autoregressive(
                decode_one,
                first_target=input_vector[:, 0, target_pos],
                first_hidden_state=hidden_state,
                target_scale=target_scale,
                n_decoder_steps=input_vector.size(1),
            )
            # reshape predictions for n_samples:
            # from n_samples * batch_size x time steps to batch_size x time steps x n_samples
            output = apply_to_list(
                output, lambda x: x.reshape(-1, n_samples, input_vector.size(1)
                                            ).permute(0, 2, 1))
            output_transformation = None
        return output, output_transformation
Пример #2
0
    def log_metrics(
        self,
        x: Dict[str, torch.Tensor],
        y: torch.Tensor,
        out: Dict[str, torch.Tensor],
    ) -> None:

        if out.get("output_transformation", True) is not None:
            # use distribution properties to create point prediction
            out = copy(out)  # copy to avoid side-effects but do not deep copy to re-use references
            y_hat_detached = apply_to_list(out["prediction"], lambda x: x.detach())
            y_hat_point_detached = apply_to_list(
                self.loss.map_x_to_distribution(y_hat_detached), lambda x: x.mean.unsqueeze(-1)
            )
            out["prediction"] = y_hat_point_detached
            out["output_transformation"] = None
        super().log_metrics(x, y, out)
Пример #3
0
 def log_prediction(self, x, out, batch_idx) -> None:
     if (
         out.get("output_transformation", True) is not None
         and (batch_idx % self.log_interval == 0 or self.log_interval < 1.0)
         and self.log_interval > 0
     ):
         out = copy(out)  # copy to avoid side-effects but do not deep copy to re-use references
         # sample from distribution to create valid prediction
         y_hat_detached = apply_to_list(out["prediction"], lambda x: x.detach())
         if self.hparams.n_plotting_samples is None:
             y_hat_samples = apply_to_list(
                 self.loss.map_x_to_distribution(y_hat_detached), lambda x: x.mean.unsqueeze(-1)
             )
         else:
             y_hat_samples = self.loss.sample(y_hat_detached, self.hparams.n_plotting_samples)
         out["prediction"] = y_hat_samples
         out["output_transformation"] = None
     super().log_prediction(x, out, batch_idx=batch_idx)
Пример #4
0
    def log_metrics(
        self,
        x: Dict[str, torch.Tensor],
        y: torch.Tensor,
        out: Dict[str, torch.Tensor],
    ) -> None:

        if out["prediction_type"] == "parameters":
            # use distribution properties to create point prediction
            out = copy(
                out
            )  # copy to avoid side-effects but do not deep copy to re-use references
            y_hat_detached = apply_to_list(out["prediction"],
                                           lambda x: x.detach())
            y_hat_point_detached = apply_to_list(
                self.loss.map_x_to_distribution(y_hat_detached),
                lambda x: x.mean.unsqueeze(-1))
            out["prediction"] = y_hat_point_detached
            out["prediction_type"] = "samples"
        super().log_metrics(x, y, out)
Пример #5
0
 def decode_one(
     idx,
     lagged_targets,
     hidden_state,
 ):
     x = input_vector[:, [idx]]
     x[:, 0, target_pos] = lagged_targets[-1]
     for lag, lag_positions in lagged_target_positions.items():
         if idx > lag:
             x[:, 0, lag_positions] = lagged_targets[-lag]
     prediction, hidden_state = self.decode_all(x, hidden_state)
     prediction = apply_to_list(prediction, lambda x: x[:, 0])  # select first time step
     return prediction, hidden_state
Пример #6
0
    def decode(
        self,
        input_vector: torch.Tensor,
        target_scale: torch.Tensor,
        decoder_lengths: torch.Tensor,
        hidden_state: HiddenState,
        n_samples: int = None,
    ) -> Tuple[torch.Tensor, bool]:
        """
        Decode hidden state of RNN into prediction. If n_smaples is given,
        decode not by using actual values but rather by
        sampling new targets from past predictions iteratively
        """
        if n_samples is None:
            input_vector = rnn.pack_padded_sequence(input_vector,
                                                    decoder_lengths.cpu(),
                                                    enforce_sorted=False,
                                                    batch_first=True)
            decoder_output, _ = self.rnn(
                input_vector,
                hidden_state,
            )
            decoder_output, _ = rnn.pad_packed_sequence(decoder_output,
                                                        batch_first=True)
            if isinstance(self.hparams.target, str):
                output = self.distribution_projector(decoder_output)
            else:
                output = [
                    projector(decoder_output)
                    for projector in self.distribution_projector
                ]
            output_type = "parameters"

        else:
            # run in eval, i.e. simulation mode
            target_pos = self.target_positions
            # repeat for n_samples
            input_vector = input_vector.repeat_interleave(n_samples, 0)
            hidden_state = self.rnn.repeat_interleave(hidden_state, n_samples)
            target_scale = apply_to_list(
                target_scale, lambda x: x.repeat_interleave(n_samples, 0))

            # make predictions which are fed into next step
            input_target = input_vector[:, 0, target_pos]
            output = []
            for idx in range(input_vector.size(1)):
                x = input_vector[:, [idx]]
                x[:, 0, target_pos] = input_target
                decoder_output, hidden_state = self.rnn(x, hidden_state)
                if isinstance(self.hparams.target, str):  # single target
                    normalized_prediction_parameters = self.distribution_projector(
                        decoder_output)
                else:
                    normalized_prediction_parameters = [
                        projector(decoder_output)
                        for projector in self.distribution_projector
                    ]
                # transform into real space
                prediction_parameters = self.transform_output(
                    dict(
                        prediction=normalized_prediction_parameters,
                        target_scale=target_scale,
                        prediction_type="parameters",
                    ))
                # sample value(s) from distribution and  select first sample
                prediction = apply_to_list(
                    self.loss.sample(prediction_parameters, 1),
                    lambda x: x[..., -1])
                # normalize prediction prediction
                # todo: how to handle lags (-> need list of lags and positions)
                #   -> then if prediction lenght larger than lag start imputing ->
                #   before that let timeseriesdataset take care)?
                normalized_prediction = self.output_transformer.transform(
                    prediction, target_scale=target_scale)
                if isinstance(normalized_prediction, list):
                    input_target = torch.cat(normalized_prediction, dim=-1)
                else:
                    input_target = normalized_prediction  # set next input target to normalized prediction

                # set output to unnormalized samples, append each target as n_batch_samples x n_random_samples
                output.append(
                    apply_to_list(prediction, lambda x: x.view(-1, n_samples)))
            if isinstance(self.hparams.target, str):
                output = torch.stack(output, dim=1)
            else:
                # for multi-targets
                output = [
                    torch.stack([out[idx] for out in output], dim=1)
                    for idx in range(len(self.target_positions))
                ]
            output_type = "samples"
        return output, output_type
Пример #7
0
 def decode_one(idx, target, hidden_state):
     x = input_vector[:, [idx]]
     x[:, 0, target_pos] = target
     prediction, hidden_state = self.decode_all(x, hidden_state)
     prediction = apply_to_list(prediction, lambda x: x[:, 0])  # select first time step
     return prediction, hidden_state