Exemplo n.º 1
0
    def __init__(
        self,
        input_size: int,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        history_length: int,
        context_length: int,
        prediction_length: int,
        distr_output: DistributionOutput,
        control_output: DistributionOutput,
        dropout_rate: float,
        cardinality: List[int],
        embedding_dimension: List[int],
        lags_seq: List[int],
        scaling: bool = True,
        dtype: np.dtype = np.float32,
    ) -> None:
        super().__init__()
        self.num_layers = num_layers
        self.num_cells = num_cells
        self.cell_type = cell_type
        self.history_length = history_length
        self.context_length = context_length
        self.prediction_length = prediction_length
        self.dropout_rate = dropout_rate
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.num_cat = len(cardinality)
        self.scaling = scaling
        self.dtype = dtype

        self.lags_seq = lags_seq

        self.distr_output = distr_output
        self.control_output = control_output

        rnn = {"LSTM": nn.LSTM, "GRU": nn.GRU}[self.cell_type]
        self.rnn = rnn(
            input_size=input_size,
            hidden_size=num_cells,
            num_layers=num_layers,
            dropout=dropout_rate,
            batch_first=True,
        )

        self.target_shape = distr_output.event_shape

        self.proj_distr_args = distr_output.get_args_proj(num_cells + 1)
        self.proj_control_args = control_output.get_args_proj(num_cells)

        self.embedder = FeatureEmbedder(cardinalities=cardinality,
                                        embedding_dims=embedding_dimension)

        if scaling:
            self.scaler = MeanScaler(keepdim=True)
            self.control_scaler = NOPScaler(keepdim=True)
        else:
            self.scaler = NOPScaler(keepdim=True)
            self.control_scaler = NOPScaler(keepdim=True)
Exemplo n.º 2
0
    def __init__(
        self,
        num_hidden_dimensions: List[int],
        prediction_length: int,
        context_length: int,
        batch_normalization: bool,
        mean_scaling: bool,
        distr_output: DistributionOutput,
    ) -> None:
        super().__init__()

        self.num_hidden_dimensions = num_hidden_dimensions
        self.prediction_length = prediction_length
        self.context_length = context_length
        self.batch_normalization = batch_normalization
        self.mean_scaling = mean_scaling
        self.distr_output = distr_output

        modules = []
        dims = self.num_hidden_dimensions

        for i, units in enumerate(dims[:-1]):
            if i == 0:
                input_size = context_length
            else:
                input_size = dims[i - 1]
            modules += [nn.Linear(input_size, units), nn.ReLU()]
            if self.batch_normalization:
                modules.append(nn.BatchNorm1d(units))
        if len(dims) == 1:
            modules.append(
                nn.Linear(context_length, dims[-1] * prediction_length))
        else:
            modules.append(nn.Linear(dims[-2], dims[-1] * prediction_length))
        modules.append(
            LambdaLayer(
                lambda o: torch.reshape(o, (-1, prediction_length, dims[-1]))))

        self.mlp = nn.Sequential(*modules)
        self.distr_args_proj = self.distr_output.get_args_proj(dims[-1])
        self.criterion = nn.SmoothL1Loss(reduction="none")
        self.scaler = MeanScaler() if mean_scaling else NOPScaler()
    def __init__(
        self,
        input_size: int,
        d_model: int,
        num_heads: int,
        act_type: str,
        dropout_rate: float,
        dim_feedforward_scale: int,
        num_encoder_layers: int,
        num_decoder_layers: int,
        history_length: int,
        context_length: int,
        prediction_length: int,
        lags_seq: List[int],
        target_dim: int,
        conditioning_length: int,
        flow_type: str,
        n_blocks: int,
        hidden_size: int,
        n_hidden: int,
        dequantize: bool,
        cardinality: List[int] = [1],
        embedding_dimension: int = 1,
        scaling: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.target_dim = target_dim
        self.prediction_length = prediction_length
        self.context_length = context_length
        self.history_length = history_length
        self.scaling = scaling

        assert len(
            set(lags_seq)) == len(lags_seq), "no duplicated lags allowed!"
        lags_seq.sort()
        self.lags_seq = lags_seq

        self.encoder_input = nn.Linear(input_size, d_model)
        self.decoder_input = nn.Linear(input_size, d_model)

        # [B, T, d_model] where d_model / num_heads is int
        self.transformer = nn.Transformer(
            d_model=d_model,
            nhead=num_heads,
            num_encoder_layers=num_encoder_layers,
            num_decoder_layers=num_decoder_layers,
            dim_feedforward=dim_feedforward_scale * d_model,
            dropout=dropout_rate,
            activation=act_type,
        )

        flow_cls = {
            "RealNVP": RealNVP,
            "MAF": MAF,
        }[flow_type]
        self.flow = flow_cls(
            input_size=target_dim,
            n_blocks=n_blocks,
            n_hidden=n_hidden,
            hidden_size=hidden_size,
            cond_label_size=conditioning_length,
        )
        self.dequantize = dequantize

        self.distr_output = FlowOutput(self.flow,
                                       input_size=target_dim,
                                       cond_size=conditioning_length)

        self.proj_dist_args = self.distr_output.get_args_proj(d_model)

        self.embed_dim = 1
        self.embed = nn.Embedding(num_embeddings=self.target_dim,
                                  embedding_dim=self.embed_dim)

        if self.scaling:
            self.scaler = MeanScaler(keepdim=True)
        else:
            self.scaler = NOPScaler(keepdim=True)

        # mask
        self.register_buffer(
            "tgt_mask",
            self.transformer.generate_square_subsequent_mask(
                prediction_length),
        )
Exemplo n.º 4
0
    def __init__(
        self,
        input_size: int,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        history_length: int,
        context_length: int,
        prediction_length: int,
        distr_output: DistributionOutput,
        dropout_rate: float,
        lags_seq: List[int],
        target_dim: int,
        conditioning_length: int,
        cardinality: List[int] = [1],
        embedding_dimension: int = 1,
        scaling: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.num_layers = num_layers
        self.num_cells = num_cells
        self.cell_type = cell_type
        self.history_length = history_length
        self.context_length = context_length
        self.prediction_length = prediction_length
        self.dropout_rate = dropout_rate
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.num_cat = len(cardinality)
        self.target_dim = target_dim
        self.scaling = scaling
        self.target_dim_sample = target_dim
        self.conditioning_length = conditioning_length

        assert len(
            set(lags_seq)) == len(lags_seq), "no duplicated lags allowed!"
        lags_seq.sort()

        self.lags_seq = lags_seq

        self.distr_output = distr_output

        self.target_dim = target_dim

        rnn = {"LSTM": nn.LSTM, "GRU": nn.GRU}[self.cell_type]
        self.rnn = rnn(
            input_size=input_size,
            hidden_size=num_cells,
            num_layers=num_layers,
            dropout=dropout_rate,
            batch_first=True,
        )

        self.proj_dist_args = distr_output.get_args_proj(num_cells)

        self.embed_dim = 1
        self.embed = nn.Embedding(num_embeddings=self.target_dim,
                                  embedding_dim=self.embed_dim)

        if scaling:
            self.scaler = MeanScaler(keepdim=True)
        else:
            self.scaler = NOPScaler(keepdim=True)
Exemplo n.º 5
0
    def __init__(
        self,
        input_size: int,
        decoder_size: int,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        short_cycle: int,
        history_length: int,
        context_length: int,    # should be equal to the length of long period
        prediction_length: int,     # should be integer multiples of small period
        distr_output: DistributionOutput,
        dropout_rate: float,
        cardinality: List[int],
        embedding_dimension: List[int],
        lags_seq: List[int],
        scaling: bool = True,
        dtype: np.dtype = np.float32,
    ) -> None:
        super().__init__()
        self.num_layers = num_layers
        self.num_cells = num_cells
        self.cell_type = cell_type
        self.short_cycle = short_cycle
        self.history_length = history_length
        self.context_length = context_length
        self.prediction_length = prediction_length
        self.dropout_rate = dropout_rate
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.num_cat = len(cardinality)
        self.scaling = scaling
        self.dtype = dtype
        # self.lags_seq = lags_seq + [0]   # 0 is the current value
        self.lags_seq = [l - 1 for l in lags_seq]   # to distinguish with deepAR

        self.distr_output = distr_output
        rnn = {"LSTM": nn.LSTM, "GRU": nn.GRU}[self.cell_type]

        self.long_cell = nn.GRUCell(input_size=input_size, hidden_size=num_cells)
        self.encoder = nn.ModuleList(
            rnn(
                input_size=num_cells,
                hidden_size=num_cells,
                num_layers=num_layers,
                dropout=dropout_rate,
                batch_first=True,
            )
            for _ in range(self.context_length // self.short_cycle)
        )
        # self.encoder = rnn(
        #         input_size=num_cells,
        #         hidden_size=num_cells,
        #         num_layers=num_layers,
        #         dropout=dropout_rate,
        #         batch_first=True,
        #     )
        self.decoder = rnn(
            input_size=decoder_size,
            hidden_size=num_cells,
            num_layers=num_layers,
            dropout=dropout_rate,
            batch_first=True,
        )

        self.decoder_dnn = nn.ModuleList(nn.Sequential(
            nn.Linear(decoder_size, 1),
            # nn.ReLU()
        ) for _ in range(self.prediction_length))

        self.out = nn.Linear(num_cells, 1)
        self.criterion = nn.MSELoss()    # l2, l1

        self.target_shape = distr_output.event_shape

        self.proj_distr_args = distr_output.get_args_proj(num_cells)

        self.embedder = FeatureEmbedder(
            cardinalities=cardinality, embedding_dims=embedding_dimension
        )

        if scaling:
            self.scaler = MeanScaler(keepdim=True)
        else:
            self.scaler = NOPScaler(keepdim=True)
Exemplo n.º 6
0
    def __init__(
        self,
        input_size: int,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        history_length: int,
        context_length: int,
        prediction_length: int,
        dropout_rate: float,
        lags_seq: List[int],
        target_dim: int,
        conditioning_length: int,
        flow_type: str,
        n_blocks: int,
        hidden_size: int,
        n_hidden: int,
        dequantize: bool,
        cardinality: List[int] = [1],
        embedding_dimension: int = 1,
        scaling: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.target_dim = target_dim
        self.prediction_length = prediction_length
        self.context_length = context_length
        self.history_length = history_length
        self.scaling = scaling

        assert len(
            set(lags_seq)) == len(lags_seq), "no duplicated lags allowed!"
        lags_seq.sort()
        self.lags_seq = lags_seq

        self.cell_type = cell_type
        rnn_cls = {"LSTM": nn.LSTM, "GRU": nn.GRU}[cell_type]
        self.rnn = rnn_cls(
            input_size=input_size,
            hidden_size=num_cells,
            num_layers=num_layers,
            dropout=dropout_rate,
            batch_first=True,
        )

        flow_cls = {
            "RealNVP": RealNVP,
            "MAF": MAF,
        }[flow_type]
        self.flow = flow_cls(
            input_size=target_dim,
            n_blocks=n_blocks,
            n_hidden=n_hidden,
            hidden_size=hidden_size,
            cond_label_size=conditioning_length,
        )
        self.dequantize = dequantize

        self.distr_output = FlowOutput(self.flow,
                                       input_size=target_dim,
                                       cond_size=conditioning_length)

        self.proj_dist_args = self.distr_output.get_args_proj(num_cells)

        self.embed_dim = 1
        self.embed = nn.Embedding(num_embeddings=self.target_dim,
                                  embedding_dim=self.embed_dim)

        if self.scaling:
            self.scaler = MeanScaler(keepdim=True)
        else:
            self.scaler = NOPScaler(keepdim=True)
Exemplo n.º 7
0
def test_nopscaler(target, observed):
    s = NOPScaler()
    target_scaled, scale = s(target, observed)

    assert torch.norm(target - target_scaled) == 0
    assert torch.norm(torch.ones_like(target).mean(dim=1) - scale) == 0
Exemplo n.º 8
0
     1e-10 * torch.ones((5, )),
 ),
 (
     MeanScaler(minimum_scale=1e-6),
     torch.randn((5, 30, 1)),
     torch.zeros((5, 30, 1)),
     1e-6 * torch.ones((5, 1)),
 ),
 (
     MeanScaler(minimum_scale=1e-12),
     torch.randn((5, 30, 3)),
     torch.zeros((5, 30, 3)),
     1e-12 * torch.ones((5, 3)),
 ),
 (
     NOPScaler(),
     torch.randn((10, 20, 30)),
     torch.randn((10, 20, 30)) > 0,
     torch.ones((10, 30)),
 ),
 (
     NOPScaler(),
     torch.randn((10, 20, 30)),
     torch.ones((10, 20, 30)),
     torch.ones((10, 30)),
 ),
 (
     NOPScaler(),
     torch.randn((10, 20, 30)),
     torch.zeros((10, 20, 30)),
     torch.ones((10, 30)),
Exemplo n.º 9
0
    def __init__(
        self,
        input_size: int,
        d_model: int,
        num_heads: int,
        act_type: str,
        dropout_rate: float,
        dim_feedforward_scale: int,
        num_encoder_layers: int,
        num_decoder_layers: int,
        history_length: int,
        context_length: int,
        prediction_length: int,
        distr_output: DistributionOutput,
        cardinality: List[int],
        embedding_dimension: List[int],
        lags_seq: List[int],
        scaling: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)

        self.history_length = history_length
        self.context_length = context_length
        self.prediction_length = prediction_length
        self.scaling = scaling
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.distr_output = distr_output

        assert len(set(lags_seq)) == len(lags_seq), "no duplicated lags allowed!"
        lags_seq.sort()

        self.lags_seq = lags_seq

        self.target_shape = distr_output.event_shape

        # [B, T, input_size] -> [B, T, d_model]
        self.encoder_input = nn.Linear(input_size, d_model)
        self.decoder_input = nn.Linear(input_size, d_model)

        # [B, T, d_model] where d_model / num_heads is int
        self.transformer = nn.Transformer(
            d_model=d_model,
            nhead=num_heads,
            num_encoder_layers=num_encoder_layers,
            num_decoder_layers=num_decoder_layers,
            dim_feedforward=dim_feedforward_scale * d_model,
            dropout=dropout_rate,
            activation=act_type,
        )

        self.proj_dist_args = distr_output.get_args_proj(d_model)

        self.embedder = FeatureEmbedder(
            cardinalities=cardinality, embedding_dims=embedding_dimension,
        )

        if scaling:
            self.scaler = MeanScaler(keepdim=True)
        else:
            self.scaler = NOPScaler(keepdim=True)

        # mask
        self.register_buffer(
            "tgt_mask", self.transformer.generate_square_subsequent_mask(prediction_length)
        )
Exemplo n.º 10
0
    def __init__(
        self,
        num_series: int,
        channels: int,
        kernel_size: int,
        rnn_cell_type: str,
        rnn_num_cells: int,
        skip_rnn_cell_type: str,
        skip_rnn_num_cells: int,
        skip_size: int,
        ar_window: int,
        context_length: int,
        horizon: Optional[int],
        prediction_length: Optional[int],
        dropout_rate: float,
        output_activation: Optional[str],
        scaling: bool,
        *args,
        **kwargs,
    ) -> None:
        super().__init__(*args, **kwargs)

        self.num_series = num_series
        self.channels = channels
        assert (
            channels % skip_size == 0
        ), "number of conv1d `channels` must be divisible by the `skip_size`"
        self.skip_size = skip_size
        assert ar_window > 0, "auto-regressive window must be a positive integer"
        self.ar_window = ar_window
        assert not ((horizon is None)) == (
            prediction_length is None
        ), "Exactly one of `horizon` and `prediction_length` must be set at a time"
        assert horizon is None or horizon > 0, "`horizon` must be greater than zero"
        assert (prediction_length is None or prediction_length > 0
                ), "`prediction_length` must be greater than zero"
        self.prediction_length = prediction_length
        self.horizon = horizon
        assert context_length > 0, "`context_length` must be greater than zero"
        self.context_length = context_length
        if output_activation is not None:
            assert output_activation in [
                "sigmoid",
                "tanh",
            ], "`output_activation` must be either 'sigmiod' or 'tanh' "
        self.output_activation = output_activation
        assert rnn_cell_type in [
            "GRU",
            "LSTM",
        ], "`rnn_cell_type` must be either 'GRU' or 'LSTM' "
        assert skip_rnn_cell_type in [
            "GRU",
            "LSTM",
        ], "`skip_rnn_cell_type` must be either 'GRU' or 'LSTM' "

        conv_out = context_length - kernel_size
        self.conv_skip = conv_out // skip_size
        assert self.conv_skip > 0, (
            "conv1d output size must be greater than or equal to `skip_size`\n"
            "Choose a smaller `kernel_size` or bigger `context_length`")

        self.cnn = nn.Conv2d(in_channels=1,
                             out_channels=channels,
                             kernel_size=(num_series, kernel_size))

        self.dropout = nn.Dropout(p=dropout_rate)

        rnn = {"LSTM": nn.LSTM, "GRU": nn.GRU}[rnn_cell_type]
        self.rnn = rnn(
            input_size=channels,
            hidden_size=rnn_num_cells,
            # dropout=dropout_rate,
        )

        skip_rnn = {"LSTM": nn.LSTM, "GRU": nn.GRU}[skip_rnn_cell_type]
        self.skip_rnn_num_cells = skip_rnn_num_cells
        self.skip_rnn = skip_rnn(
            input_size=channels,
            hidden_size=skip_rnn_num_cells,
            # dropout=dropout_rate,
        )

        self.fc = nn.Linear(rnn_num_cells + skip_size * skip_rnn_num_cells,
                            num_series)

        if self.horizon:
            self.ar_fc = nn.Linear(ar_window, 1)
        else:
            self.ar_fc = nn.Linear(ar_window, prediction_length)

        if scaling:
            self.scaler = MeanScaler(keepdim=True, time_first=False)
        else:
            self.scaler = NOPScaler(keepdim=True, time_first=False)
Exemplo n.º 11
0
    def __init__(
        self,
        input_size: int,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        history_length: int,
        context_length: int,
        prediction_length: int,
        dropout_rate: float,
        lags_seq: List[int],
        target_dim: int,
        conditioning_length: int,
        diff_steps: int,
        loss_type: str,
        beta_end: float,
        beta_schedule: str,
        residual_layers: int,
        residual_channels: int,
        dilation_cycle_length: int,
        cardinality: List[int] = [1],
        embedding_dimension: int = 1,
        scaling: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.target_dim = target_dim
        self.prediction_length = prediction_length
        self.context_length = context_length
        self.history_length = history_length
        self.scaling = scaling

        assert len(
            set(lags_seq)) == len(lags_seq), "no duplicated lags allowed!"
        lags_seq.sort()
        self.lags_seq = lags_seq

        self.cell_type = cell_type
        rnn_cls = {"LSTM": nn.LSTM, "GRU": nn.GRU}[cell_type]
        self.rnn = rnn_cls(
            input_size=input_size,
            hidden_size=num_cells,
            num_layers=num_layers,
            dropout=dropout_rate,
            batch_first=True,
        )

        self.denoise_fn = EpsilonTheta(
            target_dim=target_dim,
            cond_length=conditioning_length,
            residual_layers=residual_layers,
            residual_channels=residual_channels,
            dilation_cycle_length=dilation_cycle_length,
        )

        self.diffusion = GaussianDiffusion(
            self.denoise_fn,
            input_size=target_dim,
            diff_steps=diff_steps,
            loss_type=loss_type,
            beta_end=beta_end,
            beta_schedule=beta_schedule,
        )

        self.distr_output = DiffusionOutput(self.diffusion,
                                            input_size=target_dim,
                                            cond_size=conditioning_length)

        self.proj_dist_args = self.distr_output.get_args_proj(num_cells)

        self.embed_dim = 1
        self.embed = nn.Embedding(num_embeddings=self.target_dim,
                                  embedding_dim=self.embed_dim)

        if self.scaling:
            self.scaler = MeanScaler(keepdim=True)
        else:
            self.scaler = NOPScaler(keepdim=True)