コード例 #1
0
ファイル: _network.py プロジェクト: nllosse/gluon-ts
    def __init__(
        self,
        num_hidden_dimensions: List[int],
        prediction_length: int,
        context_length: int,
        batch_normalization: bool,
        mean_scaling: bool,
        distr_output: DistributionOutput,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)

        self.num_hidden_dimensions = num_hidden_dimensions
        self.prediction_length = prediction_length
        self.context_length = context_length
        self.batch_normalization = batch_normalization
        self.mean_scaling = mean_scaling
        self.distr_output = distr_output

        with self.name_scope():
            self.distr_args_proj = self.distr_output.get_args_proj()
            self.mlp = mx.gluon.nn.HybridSequential()
            dims = self.num_hidden_dimensions
            for layer_no, units in enumerate(dims[:-1]):
                self.mlp.add(mx.gluon.nn.Dense(units=units, activation="relu"))
                if self.batch_normalization:
                    self.mlp.add(mx.gluon.nn.BatchNorm())
            self.mlp.add(mx.gluon.nn.Dense(units=prediction_length * dims[-1]))
            self.mlp.add(
                mx.gluon.nn.HybridLambda(lambda F, o: F.reshape(
                    o, (-1, prediction_length, dims[-1]))))
            self.scaler = MeanScaler() if mean_scaling else NOPScaler()
コード例 #2
0
ファイル: _seq2seq_estimator.py プロジェクト: yx1215/gluon-ts
 def __init__(
     self,
     freq: str,
     prediction_length: int,
     cardinality: List[int],
     embedding_dimension: int,
     encoder_mlp_layer: List[int],
     decoder_mlp_layer: List[int],
     decoder_mlp_static_dim: int,
     scaler: Scaler = NOPScaler(),
     context_length: Optional[int] = None,
     quantiles: List[float] = list([0.1, 0.5, 0.9]),
     trainer: Trainer = Trainer(),
     num_parallel_samples: int = 100,
 ) -> None:
     encoder = MLPEncoder(layer_sizes=encoder_mlp_layer)
     super(MLP2QRForecaster, self).__init__(
         freq=freq,
         prediction_length=prediction_length,
         encoder=encoder,
         cardinality=cardinality,
         embedding_dimension=embedding_dimension,
         decoder_mlp_layer=decoder_mlp_layer,
         decoder_mlp_static_dim=decoder_mlp_static_dim,
         context_length=context_length,
         scaler=scaler,
         quantiles=quantiles,
         trainer=trainer,
         num_parallel_samples=num_parallel_samples,
     )
コード例 #3
0
    def __init__(
        self,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        past_length: int,
        prediction_length: int,
        issm: ISSM,
        dropout_rate: float,
        cardinality: List[int],
        embedding_dimension: int,
        scaling: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.num_layers = num_layers
        self.num_cells = num_cells
        self.cell_type = cell_type
        self.past_length = past_length
        self.prediction_length = prediction_length
        self.issm = issm
        self.dropout_rate = dropout_rate
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.num_cat = len(cardinality)
        self.scaling = scaling

        self.univariate = self.issm.output_dim() == 1

        with self.name_scope():
            self.prior_mean_model = mx.gluon.nn.Dense(
                units=self.issm.latent_dim(), flatten=False)
            self.prior_cov_diag_model = mx.gluon.nn.Dense(
                units=self.issm.latent_dim(),
                activation="sigmoid",  # TODO: puot explicit upper bound
                flatten=False,
            )
            self.lstm = mx.gluon.rnn.HybridSequentialRNNCell()
            self.lds_proj = LDSArgsProj(output_dim=self.issm.output_dim())
            for k in range(num_layers):
                cell = mx.gluon.rnn.LSTMCell(hidden_size=num_cells)
                cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
                cell = (mx.gluon.rnn.ZoneoutCell(cell,
                                                 zoneout_states=dropout_rate)
                        if dropout_rate > 0.0 else cell)
                self.lstm.add(cell)
            self.embedder = FeatureEmbedder(
                cardinalities=cardinality,
                embedding_dims=[embedding_dimension for _ in cardinality],
            )
            if scaling:
                self.scaler = MeanScaler(keepdims=False)
            else:
                self.scaler = NOPScaler(keepdims=False)
コード例 #4
0
    def __init__(
        self,
        encoder: TransformerEncoder,
        decoder: TransformerDecoder,
        history_length: int,
        context_length: int,
        prediction_length: int,
        distr_output: DistributionOutput,
        cardinality: List[int],
        embedding_dimension: int,
        lags_seq: List[int],
        scaling: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)

        self.history_length = history_length
        self.context_length = context_length
        self.prediction_length = prediction_length
        self.scaling = scaling
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.distr_output = distr_output

        assert len(
            set(lags_seq)) == len(lags_seq), "no duplicated lags allowed!"
        lags_seq.sort()

        self.lags_seq = lags_seq

        self.target_shape = distr_output.event_shape

        with self.name_scope():
            self.proj_dist_args = distr_output.get_args_proj()
            self.encoder = encoder
            self.decoder = decoder
            self.embedder = FeatureEmbedder(
                cardinalities=cardinality,
                embedding_dims=[embedding_dimension for _ in cardinality],
            )

            if scaling:
                self.scaler = MeanScaler(keepdims=True)
            else:
                self.scaler = NOPScaler(keepdims=True)
コード例 #5
0
    def __init__(
        self,
        freq: str,
        prediction_length: int,
        cardinality: List[int],
        embedding_dimension: int,
        encoder: Seq2SeqEncoder,
        decoder_mlp_layer: List[int],
        decoder_mlp_static_dim: int,
        scaler: Scaler = NOPScaler(),
        context_length: Optional[int] = None,
        quantiles: Optional[List[float]] = None,
        trainer: Trainer = Trainer(),
        num_parallel_samples: int = 100,
    ) -> None:
        assert (prediction_length >
                0), "The value of `prediction_length` should be > 0"
        assert (context_length is None or context_length > 0
                ), "The value of `context_length` should be > 0"
        assert quantiles is None or all(
            0 <= d <= 1 for d in
            quantiles), "Elements of `quantiles` should be >= 0 and <= 1"

        super().__init__(trainer=trainer)

        self.context_length = (context_length if context_length is not None
                               else prediction_length)
        self.prediction_length = prediction_length
        self.freq = freq
        self.quantiles = (quantiles
                          if quantiles is not None else [0.1, 0.5, 0.9])
        self.encoder = encoder
        self.decoder_mlp_layer = decoder_mlp_layer
        self.decoder_mlp_static_dim = decoder_mlp_static_dim
        self.scaler = scaler
        self.embedder = FeatureEmbedder(
            cardinalities=cardinality,
            embedding_dims=[embedding_dimension for _ in cardinality],
        )
        self.num_parallel_samples = num_parallel_samples
コード例 #6
0
 def __init__(
     self,
     freq: str,
     prediction_length: int,
     cardinality: List[int],
     embedding_dimension: int,
     encoder_rnn_layer: int,
     encoder_rnn_num_hidden: int,
     decoder_mlp_layer: List[int],
     decoder_mlp_static_dim: int,
     encoder_rnn_model: str = "lstm",
     encoder_rnn_bidirectional: bool = True,
     scaler: Scaler = NOPScaler(),
     context_length: Optional[int] = None,
     quantiles: Optional[List[float]] = None,
     trainer: Trainer = Trainer(),
     num_parallel_samples: int = 100,
 ) -> None:
     encoder = RNNEncoder(
         mode=encoder_rnn_model,
         hidden_size=encoder_rnn_num_hidden,
         num_layers=encoder_rnn_layer,
         bidirectional=encoder_rnn_bidirectional,
         use_static_feat=True,
         use_dynamic_feat=True,
     )
     super(RNN2QRForecaster, self).__init__(
         freq=freq,
         prediction_length=prediction_length,
         encoder=encoder,
         cardinality=cardinality,
         embedding_dimension=embedding_dimension,
         decoder_mlp_layer=decoder_mlp_layer,
         decoder_mlp_static_dim=decoder_mlp_static_dim,
         context_length=context_length,
         scaler=scaler,
         quantiles=quantiles,
         trainer=trainer,
         num_parallel_samples=num_parallel_samples,
     )
コード例 #7
0
    def __init__(
        self,
        encoder: Seq2SeqEncoder,
        enc2dec: Seq2SeqEnc2Dec,
        decoder: Seq2SeqDecoder,
        quantile_output: QuantileOutput,
        context_length: int,
        cardinality: List[int],
        embedding_dimension: List[int],
        scaling: bool = True,
        dtype: DType = np.float32,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)

        self.encoder = encoder
        self.enc2dec = enc2dec
        self.decoder = decoder
        self.quantile_output = quantile_output
        self.context_length = context_length
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.scaling = scaling
        self.dtype = dtype

        if self.scaling:
            self.scaler = MeanScaler(keepdims=True)
        else:
            self.scaler = NOPScaler(keepdims=True)

        with self.name_scope():
            self.quantile_proj = quantile_output.get_quantile_proj()
            self.loss = quantile_output.get_loss()
            self.embedder = FeatureEmbedder(
                cardinalities=cardinality,
                embedding_dims=embedding_dimension,
                dtype=self.dtype,
            )
コード例 #8
0
    def __init__(
        self,
        freq: str,
        prediction_length: int,
        cardinality: List[int],
        embedding_dimension: int,
        decoder_mlp_layer: List[int],
        decoder_mlp_static_dim: int,
        scaler: Scaler = NOPScaler(),
        context_length: Optional[int] = None,
        quantiles: Optional[List[float]] = None,
        trainer: Trainer = Trainer(),
        num_parallel_samples: int = 100,
    ) -> None:
        encoder = HierarchicalCausalConv1DEncoder(
            dilation_seq=[1, 3, 9],
            kernel_size_seq=([3] * len([30, 30, 30])),
            channels_seq=[30, 30, 30],
            use_residual=True,
            use_dynamic_feat=True,
            use_static_feat=True,
        )

        super(CNN2QRForecaster, self).__init__(
            freq=freq,
            prediction_length=prediction_length,
            encoder=encoder,
            cardinality=cardinality,
            embedding_dimension=embedding_dimension,
            decoder_mlp_layer=decoder_mlp_layer,
            decoder_mlp_static_dim=decoder_mlp_static_dim,
            context_length=context_length,
            scaler=scaler,
            quantiles=quantiles,
            trainer=trainer,
            num_parallel_samples=num_parallel_samples,
        )
コード例 #9
0
ファイル: _network.py プロジェクト: ymbkxc/gluon-ts
    def __init__(
        self,
        prediction_length: int,
        context_length: int,
        num_stacks: int,
        widths: List[int],
        num_blocks: List[int],
        num_block_layers: List[int],
        expansion_coefficient_lengths: List[int],
        sharing: List[bool],
        stack_types: List[str],
        scale: bool = False,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)

        self.num_stacks = num_stacks
        self.widths = widths
        self.num_blocks = num_blocks
        self.num_block_layers = num_block_layers
        self.sharing = sharing
        self.expansion_coefficient_lengths = expansion_coefficient_lengths
        self.stack_types = stack_types
        self.prediction_length = prediction_length
        self.context_length = context_length

        if scale:
            self.scaler = MeanScaler(keepdims=True)
        else:
            self.scaler = NOPScaler(keepdims=True)

        with self.name_scope():
            self.net_blocks: List[NBEATSBlock] = []

            # connect all the blocks correctly
            for stack_id in range(num_stacks):
                for block_id in range(num_blocks[stack_id]):
                    # in case sharing is enabled for the stack
                    params = (
                        self.net_blocks[-1].collect_params()
                        if (block_id > 0 and sharing[stack_id])
                        else None
                    )
                    # only last one does not have backcast
                    has_backcast = not (
                        stack_id == num_stacks - 1
                        and block_id == num_blocks[num_stacks - 1] - 1
                    )
                    if self.stack_types[stack_id] == "G":
                        net_block = NBEATSGenericBlock(
                            width=self.widths[stack_id],
                            num_block_layers=self.num_block_layers[stack_id],
                            expansion_coefficient_length=self.expansion_coefficient_lengths[
                                stack_id
                            ],
                            prediction_length=prediction_length,
                            context_length=context_length,
                            has_backcast=has_backcast,
                            params=params,
                        )
                    elif self.stack_types[stack_id] == "S":
                        net_block = NBEATSSeasonalBlock(
                            width=self.widths[stack_id],
                            num_block_layers=self.num_block_layers[stack_id],
                            expansion_coefficient_length=self.expansion_coefficient_lengths[
                                stack_id
                            ],
                            prediction_length=prediction_length,
                            context_length=context_length,
                            has_backcast=has_backcast,
                            params=params,
                        )
                    else:  # self.stack_types[stack_id] == "T"
                        net_block = NBEATSTrendBlock(
                            width=self.widths[stack_id],
                            num_block_layers=self.num_block_layers[stack_id],
                            expansion_coefficient_length=self.expansion_coefficient_lengths[
                                stack_id
                            ],
                            prediction_length=prediction_length,
                            context_length=context_length,
                            has_backcast=has_backcast,
                            params=params,
                        )

                    self.net_blocks.append(net_block)
                    self.register_child(
                        net_block, f"block_{stack_id}_{block_id}"
                    )
コード例 #10
0
    def __init__(
        self,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        history_length: int,
        context_length: int,
        prediction_length: int,
        distr_output_m: DistributionOutput,
        distr_output_q: DistributionOutput,
        dropout_rate: float,
        cardinality: List[int],
        embedding_dimension: List[int],
        lags_seq: List[int],
        scaling: bool = True,
        dtype: DType = np.float32,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.num_layers = num_layers
        self.num_cells = num_cells
        self.cell_type = cell_type
        self.history_length = history_length
        self.context_length = context_length
        self.prediction_length = prediction_length
        self.dropout_rate = dropout_rate
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.num_cat = len(cardinality)
        self.scaling = scaling
        self.dtype = dtype

        assert len(cardinality) == len(
            embedding_dimension
        ), "embedding_dimension should be a list with the same size as cardinality"

        assert len(
            set(lags_seq)) == len(lags_seq), "no duplicated lags allowed!"
        lags_seq.sort()

        self.lags_seq = lags_seq
        # 2 separate distributions
        self.distr_output_m = distr_output_m
        self.distr_output_q = distr_output_q
        RnnCell = {
            "lstm": mx.gluon.rnn.LSTMCell,
            "gru": mx.gluon.rnn.GRUCell
        }[self.cell_type]

        self.target_shape = distr_output_m.event_shape

        # TODO: is the following restriction needed?
        assert (
            len(self.target_shape) <= 1
        ), "Argument `target_shape` should be a tuple with 1 element at most"

        with self.name_scope():
            self.proj_distr_args_m = self.distr_output_m.get_args_proj(
                prefix="m")
            self.proj_distr_args_q = self.distr_output_q.get_args_proj(
                prefix="q")
            self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
            for k in range(num_layers):
                cell = RnnCell(hidden_size=num_cells)
                cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
                cell = (mx.gluon.rnn.ZoneoutCell(cell,
                                                 zoneout_states=dropout_rate)
                        if dropout_rate > 0.0 else cell)
                self.rnn.add(cell)
            self.rnn.cast(dtype=dtype)
            self.embedder = FeatureEmbedder(
                cardinalities=cardinality,
                embedding_dims=embedding_dimension,
                dtype=self.dtype,
            )
            if scaling:
                self.scaler = MeanScaler(keepdims=True)
            else:
                self.scaler = NOPScaler(keepdims=True)
コード例 #11
0
ファイル: _network.py プロジェクト: michael135/gluon-ts
 def __init__(
     self,
     num_series: int,
     channels: int,
     kernel_size: int,
     rnn_cell_type: str,
     rnn_num_layers: int,
     rnn_num_cells: int,
     skip_rnn_cell_type: str,
     skip_rnn_num_layers: int,
     skip_rnn_num_cells: int,
     skip_size: int,
     ar_window: int,
     context_length: int,
     lead_time: int,
     prediction_length: int,
     dropout_rate: float,
     output_activation: Optional[str],
     scaling: bool,
     dtype: DType,
     *args,
     **kwargs,
 ) -> None:
     super().__init__(*args, **kwargs)
     self.num_series = num_series
     self.channels = channels
     assert (
         channels % skip_size == 0
     ), "number of conv2d `channels` must be divisible by the `skip_size`"
     self.skip_size = skip_size
     assert (ar_window >
             0), "auto-regressive window must be a positive integer"
     self.ar_window = ar_window
     assert lead_time >= 0, "`lead_time` must be greater than zero"
     assert (prediction_length >
             0), "`prediction_length` must be greater than zero"
     self.prediction_length = prediction_length
     self.horizon = lead_time
     assert context_length > 0, "`context_length` must be greater than zero"
     self.context_length = context_length
     if output_activation is not None:
         assert output_activation in [
             "sigmoid",
             "tanh",
         ], "`output_activation` must be either 'sigmiod' or 'tanh' "
     self.output_activation = output_activation
     assert rnn_cell_type in [
         "gru",
         "lstm",
     ], "`rnn_cell_type` must be either 'gru' or 'lstm' "
     assert skip_rnn_cell_type in [
         "gru",
         "lstm",
     ], "`skip_rnn_cell_type` must be either 'gru' or 'lstm' "
     self.conv_out = context_length - kernel_size + 1
     self.conv_skip = self.conv_out // skip_size
     assert self.conv_skip > 0, (
         "conv2d output size must be greater than or equal to `skip_size`\n"
         "Choose a smaller `kernel_size` or bigger `context_length`")
     self.channel_skip_count = self.conv_skip * skip_size
     self.dtype = dtype
     with self.name_scope():
         self.cnn = nn.Conv2D(
             channels,
             (num_series, kernel_size),
             activation="relu",
             layout="NCHW",
             in_channels=1,
         )  # NC1T
         self.cnn.cast(dtype)
         self.dropout = nn.Dropout(dropout_rate)
         self.rnn = self._create_rnn_layer(rnn_num_cells, rnn_num_layers,
                                           rnn_cell_type,
                                           dropout_rate)  # NTC
         self.rnn.cast(dtype)
         self.skip_rnn_num_cells = skip_rnn_num_cells
         self.skip_rnn = self._create_rnn_layer(
             skip_rnn_num_cells,
             skip_rnn_num_layers,
             skip_rnn_cell_type,
             dropout_rate,
         )  # NTC
         self.skip_rnn.cast(dtype)
         # TODO: add temporal attention option
         self.fc = nn.Dense(num_series, dtype=dtype)
         self.ar_fc = nn.Dense(prediction_length,
                               dtype=dtype,
                               flatten=False)
         if scaling:
             self.scaler = MeanScaler(axis=2, keepdims=True)
         else:
             self.scaler = NOPScaler(axis=2, keepdims=True)
コード例 #12
0
    def __init__(
        self,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        past_length: int,
        prediction_length: int,
        issm: ISSM,
        dropout_rate: float,
        cardinality: List[int],
        embedding_dimension: List[int],
        scaling: bool = True,
        noise_std_bounds: ParameterBounds = ParameterBounds(1e-6, 1.0),
        prior_cov_bounds: ParameterBounds = ParameterBounds(1e-6, 1.0),
        innovation_bounds: ParameterBounds = ParameterBounds(1e-6, 0.01),
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.num_layers = num_layers
        self.num_cells = num_cells
        self.cell_type = cell_type
        self.past_length = past_length
        self.prediction_length = prediction_length
        self.issm = issm
        self.dropout_rate = dropout_rate
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.num_cat = len(cardinality)
        self.scaling = scaling

        assert len(cardinality) == len(
            embedding_dimension
        ), "embedding_dimension should be a list with the same size as cardinality"
        self.univariate = self.issm.output_dim() == 1

        self.noise_std_bounds = noise_std_bounds
        self.prior_cov_bounds = prior_cov_bounds
        self.innovation_bounds = innovation_bounds

        with self.name_scope():
            self.prior_mean_model = mx.gluon.nn.Dense(
                units=self.issm.latent_dim(), flatten=False
            )
            self.prior_cov_diag_model = mx.gluon.nn.Dense(
                units=self.issm.latent_dim(),
                activation="sigmoid",
                flatten=False,
            )
            self.lstm = mx.gluon.rnn.HybridSequentialRNNCell()
            self.lds_proj = LDSArgsProj(
                output_dim=self.issm.output_dim(),
                noise_std_bounds=self.noise_std_bounds,
                innovation_bounds=self.innovation_bounds,
            )
            for k in range(num_layers):
                cell = mx.gluon.rnn.LSTMCell(hidden_size=num_cells)
                cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
                cell = (
                    mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
                    if dropout_rate > 0.0
                    else cell
                )
                self.lstm.add(cell)
            self.embedder = FeatureEmbedder(
                cardinalities=cardinality, embedding_dims=embedding_dimension
            )
            if scaling:
                self.scaler = MeanScaler(keepdims=False)
            else:
                self.scaler = NOPScaler(keepdims=False)
コード例 #13
0
    def __init__(
        self,
        num_layers: int,
        num_cells: int,
        cell_type: str,
        history_length: int,
        context_length: int,
        prediction_length: int,
        distr_output: DistributionOutput,
        dropout_rate: float,
        lags_seq: List[int],
        target_dim: int,
        conditioning_length: int,
        cardinality: List[int] = [1],
        embedding_dimension: int = 1,
        scaling: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.num_layers = num_layers
        self.num_cells = num_cells
        self.cell_type = cell_type
        self.history_length = history_length
        self.context_length = context_length
        self.prediction_length = prediction_length
        self.dropout_rate = dropout_rate
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.num_cat = len(cardinality)
        self.target_dim = target_dim
        self.scaling = scaling
        self.target_dim_sample = target_dim
        self.conditioning_length = conditioning_length

        assert len(set(lags_seq)) == len(
            lags_seq
        ), "no duplicated lags allowed!"
        lags_seq.sort()

        self.lags_seq = lags_seq

        self.distr_output = distr_output

        self.target_dim = target_dim

        with self.name_scope():
            self.proj_dist_args = distr_output.get_args_proj()

            residual = True

            self.rnn = make_rnn_cell(
                cell_type=cell_type,
                num_cells=num_cells,
                num_layers=num_layers,
                residual=residual,
                dropout_rate=dropout_rate,
            )

            self.embed_dim = 1
            self.embed = mx.gluon.nn.Embedding(
                input_dim=self.target_dim, output_dim=self.embed_dim
            )

            if scaling:
                self.scaler = MeanScaler(keepdims=True)
            else:
                self.scaler = NOPScaler(keepdims=True)