Beispiel #1
0
    def __init__(
            self,
            encoder: Seq2SeqEncoder,
            context_length: Optional[int],
            prediction_length: int,
            freq: str,
            # FIXME: why do we have two parameters here?
            mlp_final_dim: int = 20,
            mlp_hidden_dimension_seq: List[int] = list(),
            quantiles: List[float] = list(),
            trainer: Trainer = Trainer(),
    ) -> None:
        context_length = (prediction_length
                          if context_length is None else context_length)
        assert all([d > 0 for d in mlp_hidden_dimension_seq
                    ]), "Elements of `mlp_hidden_dimension_seq` should be > 0"

        decoder = ForkingMLPDecoder(
            dec_len=prediction_length,
            final_dim=mlp_final_dim,
            hidden_dimension_sequence=mlp_hidden_dimension_seq,
            prefix="decoder_",
        )

        quantile_output = QuantileOutput(quantiles)

        super(MQDNNEstimator, self).__init__(
            encoder=encoder,
            decoder=decoder,
            quantile_output=quantile_output,
            freq=freq,
            prediction_length=prediction_length,
            context_length=context_length,
            trainer=trainer,
        )
Beispiel #2
0
    def __init__(
        self,
        encoder: Seq2SeqEncoder,
        enc2dec: Seq2SeqEnc2Dec,
        decoder: Seq2SeqDecoder,
        quantile_output: QuantileOutput,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)

        self.encoder = encoder
        self.enc2dec = enc2dec
        self.decoder = decoder
        self.quantile_output = quantile_output

        with self.name_scope():
            self.quantile_proj = quantile_output.get_quantile_proj()
            self.loss = quantile_output.get_loss()
Beispiel #3
0
    def __init__(
        self,
        prediction_length: int,
        freq: str,
        context_length: Optional[int] = None,
        decoder_mlp_dim_seq: List[int] = None,
        trainer: Trainer = Trainer(),
        quantiles: List[float] = None,
        scaling: bool = True,
    ) -> None:

        assert (prediction_length >
                0), f"Invalid prediction length: {prediction_length}."
        assert decoder_mlp_dim_seq is None or all(
            d > 0 for d in decoder_mlp_dim_seq
        ), "Elements of `mlp_hidden_dimension_seq` should be > 0"
        assert quantiles is None or all(
            0 <= d <= 1 for d in
            quantiles), "Elements of `quantiles` should be >= 0 and <= 1"

        self.decoder_mlp_dim_seq = (decoder_mlp_dim_seq if decoder_mlp_dim_seq
                                    is not None else [30])
        self.quantiles = (quantiles if quantiles is not None else
                          [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])

        # `use_static_feat` and `use_dynamic_feat` always True because network
        # always receives input; either from the input data or constants
        encoder = RNNEncoder(
            mode="gru",
            hidden_size=50,
            num_layers=1,
            bidirectional=True,
            prefix="encoder_",
            use_static_feat=True,
            use_dynamic_feat=True,
        )

        decoder = ForkingMLPDecoder(
            dec_len=prediction_length,
            final_dim=self.decoder_mlp_dim_seq[-1],
            hidden_dimension_sequence=self.decoder_mlp_dim_seq[:-1],
            prefix="decoder_",
        )

        quantile_output = QuantileOutput(self.quantiles)

        super().__init__(
            encoder=encoder,
            decoder=decoder,
            quantile_output=quantile_output,
            freq=freq,
            prediction_length=prediction_length,
            context_length=context_length,
            trainer=trainer,
            scaling=scaling,
        )
Beispiel #4
0
    def __init__(
        self,
        encoder: Seq2SeqEncoder,
        enc2dec: Seq2SeqEnc2Dec,
        decoder: Seq2SeqDecoder,
        quantile_output: QuantileOutput,
        context_length: int,
        cardinality: List[int],
        embedding_dimension: List[int],
        scaling: bool = True,
        dtype: DType = np.float32,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)

        self.encoder = encoder
        self.enc2dec = enc2dec
        self.decoder = decoder
        self.quantile_output = quantile_output
        self.context_length = context_length
        self.cardinality = cardinality
        self.embedding_dimension = embedding_dimension
        self.scaling = scaling
        self.dtype = dtype

        if self.scaling:
            self.scaler = MeanScaler(keepdims=True)
        else:
            self.scaler = NOPScaler(keepdims=True)

        with self.name_scope():
            self.quantile_proj = quantile_output.get_quantile_proj()
            self.loss = quantile_output.get_loss()
            self.embedder = FeatureEmbedder(
                cardinalities=cardinality,
                embedding_dims=embedding_dimension,
                dtype=self.dtype,
            )
Beispiel #5
0
    def create_training_network(self) -> mx.gluon.HybridBlock:
        distribution = QuantileOutput(self.quantiles)

        enc2dec = PassThroughEnc2Dec()
        decoder = OneShotDecoder(
            decoder_length=self.prediction_length,
            layer_sizes=self.decoder_mlp_layer,
            static_outputs_per_time_step=self.decoder_mlp_static_dim,
        )

        training_network = Seq2SeqTrainingNetwork(
            embedder=self.embedder,
            scaler=self.scaler,
            encoder=self.encoder,
            enc2dec=enc2dec,
            decoder=decoder,
            quantile_output=distribution,
        )

        return training_network
Beispiel #6
0
    def __init__(
            self,
            freq: str,
            prediction_length: int,
            context_length: Optional[int] = None,
            use_feat_dynamic_real: bool = False,
            use_feat_static_cat: bool = False,
            cardinality: List[int] = None,
            embedding_dimension: List[int] = None,
            add_time_feature: bool = False,
            add_age_feature: bool = False,
            enable_decoder_dynamic_feature: bool = False,
            seed: Optional[int] = None,
            decoder_mlp_dim_seq: Optional[List[int]] = None,
            channels_seq: Optional[List[int]] = None,
            dilation_seq: Optional[List[int]] = None,
            kernel_size_seq: Optional[List[int]] = None,
            use_residual: bool = True,
            quantiles: Optional[List[float]] = None,
            trainer: Trainer = Trainer(),
            scaling: bool = False,
    ) -> None:

        assert (prediction_length >
                0), f"Invalid prediction length: {prediction_length}."
        assert decoder_mlp_dim_seq is None or all(
            d > 0 for d in decoder_mlp_dim_seq
        ), "Elements of `mlp_hidden_dimension_seq` should be > 0"
        assert channels_seq is None or all(
            d > 0
            for d in channels_seq), "Elements of `channels_seq` should be > 0"
        assert dilation_seq is None or all(
            d > 0
            for d in dilation_seq), "Elements of `dilation_seq` should be > 0"
        # TODO: add support for kernel size=1
        assert kernel_size_seq is None or all(
            d > 1 for d in
            kernel_size_seq), "Elements of `kernel_size_seq` should be > 0"
        assert quantiles is None or all(
            0 <= d <= 1 for d in
            quantiles), "Elements of `quantiles` should be >= 0 and <= 1"

        self.decoder_mlp_dim_seq = (decoder_mlp_dim_seq if decoder_mlp_dim_seq
                                    is not None else [30])
        self.channels_seq = (channels_seq
                             if channels_seq is not None else [30, 30, 30])
        self.dilation_seq = (dilation_seq
                             if dilation_seq is not None else [1, 3, 5])
        self.kernel_size_seq = (kernel_size_seq
                                if kernel_size_seq is not None else [7, 3, 3])
        self.quantiles = (quantiles if quantiles is not None else
                          [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])

        assert (len(self.channels_seq) == len(self.dilation_seq) == len(
            self.kernel_size_seq)), (
                f"mismatch CNN configurations: {len(self.channels_seq)} vs. "
                f"{len(self.dilation_seq)} vs. {len(self.kernel_size_seq)}")

        if seed:
            np.random.seed(seed)
            mx.random.seed(seed)

        # `use_static_feat` and `use_dynamic_feat` always True because network
        # always receives input; either from the input data or constants
        encoder = HierarchicalCausalConv1DEncoder(
            dilation_seq=self.dilation_seq,
            kernel_size_seq=self.kernel_size_seq,
            channels_seq=self.channels_seq,
            use_residual=use_residual,
            use_static_feat=True,
            use_dynamic_feat=True,
            prefix="encoder_",
        )

        decoder = ForkingMLPDecoder(
            dec_len=prediction_length,
            final_dim=self.decoder_mlp_dim_seq[-1],
            hidden_dimension_sequence=self.decoder_mlp_dim_seq[:-1],
            prefix="decoder_",
        )

        quantile_output = QuantileOutput(self.quantiles)

        super().__init__(
            encoder=encoder,
            decoder=decoder,
            quantile_output=quantile_output,
            freq=freq,
            prediction_length=prediction_length,
            context_length=context_length,
            use_feat_dynamic_real=use_feat_dynamic_real,
            use_feat_static_cat=use_feat_static_cat,
            enable_decoder_dynamic_feature=enable_decoder_dynamic_feature,
            cardinality=cardinality,
            embedding_dimension=embedding_dimension,
            add_time_feature=add_time_feature,
            add_age_feature=add_age_feature,
            trainer=trainer,
            scaling=scaling,
        )