Пример #1
0
    def deserialize(
            cls,
            path: Path,
            ctx: Optional[mx.Context] = None) -> "SymbolBlockPredictor":
        ctx = ctx if ctx is not None else get_mxnet_context()

        with mx.Context(ctx):
            # deserialize constructor parameters
            with (path / "parameters.json").open("r") as fp:
                parameters = load_json(fp.read())

            parameters["ctx"] = ctx

            # deserialize transformation chain
            with (path / "input_transform.json").open("r") as fp:
                transform = load_json(fp.read())

            # deserialize prediction network
            num_inputs = len(parameters["input_names"])
            prediction_net = import_symb_block(num_inputs, path,
                                               "prediction_net")

            return SymbolBlockPredictor(
                input_transform=transform,
                prediction_net=prediction_net,
                **parameters,
            )
Пример #2
0
    def deserialize(
            cls,
            path: Path,
            ctx: Optional[mx.Context] = None) -> "RepresentableBlockPredictor":
        ctx = ctx if ctx is not None else get_mxnet_context()

        with mx.Context(ctx):
            # deserialize constructor parameters
            with (path / "parameters.json").open("r") as fp:
                parameters = load_json(fp.read())

            # deserialize transformation chain
            with (path / "input_transform.json").open("r") as fp:
                transform = load_json(fp.read())

            # deserialize prediction network
            prediction_net = import_repr_block(path, "prediction_net")

            # input_names is derived from the prediction_net
            if "input_names" in parameters:
                del parameters["input_names"]

            parameters["ctx"] = ctx

            return RepresentableBlockPredictor(
                input_transform=transform,
                prediction_net=prediction_net,
                **parameters,
            )
Пример #3
0
 def initialize_from_dataset(self,
                             input_dataset: Dataset,
                             ctx: mx.Context = get_mxnet_context()):
     # Rescale all time series in training set.
     train_target_sequence = np.array([])
     for train_entry in input_dataset:
         train_entry_target = train_entry["target"]
         train_tar_mean = np.mean(train_entry_target)
         train_entry_target /= train_tar_mean
         train_target_sequence = np.concatenate(
             [train_target_sequence, train_entry_target])
     self.initialize_from_array(train_target_sequence, ctx)
Пример #4
0
    def __init__(
        self,
        ctx: Optional[mx.Context] = None,
        epochs: int = 100,
        batch_size: int = 32,
        num_batches_per_epoch: int = 50,
        learning_rate: float = 1e-3,
        learning_rate_decay_factor: float = 0.5,
        patience: int = 10,
        minimum_learning_rate: float = 5e-5,
        clip_gradient: float = 10.0,
        weight_decay: float = 1e-8,
        init: Union[str, mx.initializer.Initializer] = "xavier",
        hybridize: bool = True,
        avg_strategy: AveragingStrategy = SelectNBestMean(num_models=1),
    ) -> None:

        assert (0 <= epochs <
                float("inf")), "The value of `epochs` should be >= 0"
        assert 0 < batch_size, "The value of `batch_size` should be > 0"
        assert (0 < num_batches_per_epoch
                ), "The value of `num_batches_per_epoch` should be > 0"
        assert (0 < learning_rate <
                float("inf")), "The value of `learning_rate` should be > 0"
        assert (
            0 <= learning_rate_decay_factor < 1
        ), "The value of `learning_rate_decay_factor` should be in the [0, 1) range"
        assert 0 <= patience, "The value of `patience` should be >= 0"
        assert (0 <= minimum_learning_rate
                ), "The value of `minimum_learning_rate` should be >= 0"
        assert 0 < clip_gradient, "The value of `clip_gradient` should be > 0"
        assert 0 <= weight_decay, "The value of `weight_decay` should be => 0"

        self.epochs = epochs
        self.batch_size = batch_size
        self.num_batches_per_epoch = num_batches_per_epoch
        self.learning_rate = learning_rate
        self.learning_rate_decay_factor = learning_rate_decay_factor
        self.patience = patience
        self.minimum_learning_rate = minimum_learning_rate
        self.clip_gradient = clip_gradient
        self.weight_decay = weight_decay
        self.init = init
        self.hybridize = hybridize
        self.avg_strategy = avg_strategy
        self.ctx = ctx if ctx is not None else get_mxnet_context()
        self.halt = False

        # Insert Lookahead optimizers...
        self.__all__ = []
        self._register_lookahead_opt()
Пример #5
0
    def initialize_from_array(
        self, input_array: np.ndarray, ctx: mx.Context = get_mxnet_context()
    ):
        r"""
        Initialize the representation based on a numpy array.

        Parameters
        ----------
        input_array
            Numpy array.
        ctx
            MXNet context.
        """
        pass
Пример #6
0
    def initialize_from_dataset(
        self, input_dataset: Dataset, ctx: mx.Context = get_mxnet_context()
    ):
        r"""
        Initialize the representation based on an entire dataset.

        Parameters
        ----------
        input_dataset
            GluonTS dataset.
        ctx
            MXNet context.
        """
        pass
Пример #7
0
    def initialize_from_array(self,
                              input_array: np.ndarray,
                              ctx: mx.Context = get_mxnet_context()):
        # Calculate bin centers and bin edges using linear or quantile binning..
        if self.is_quantile:
            bin_centers = np.quantile(
                input_array,
                np.linspace(0, self.quantile_scaling_limit, self.num_bins),
            )
            bin_centers = ensure_binning_monotonicity(bin_centers)
        else:
            has_negative_data = np.any(input_array < 0)
            low = -self.linear_scaling_limit if has_negative_data else 0
            high = self.linear_scaling_limit
            bin_centers = np.linspace(low, high, self.num_bins)
        bin_edges = bin_edges_from_bin_centers(bin_centers)

        # Store bin centers and edges since their are globally applicable to all time series.
        with ctx:
            self.bin_edges.initialize()
            self.bin_centers.initialize()
        self.bin_edges.set_data(mx.nd.array(bin_edges))
        self.bin_centers.set_data(mx.nd.array(bin_centers))
Пример #8
0
 def initialize_from_array(
     self, input_array: np.ndarray, ctx: mx.Context = get_mxnet_context()
 ):
     for representation in self.representations:
         representation.initialize_from_array(input_array, ctx)
Пример #9
0
 def initialize_from_dataset(
     self, input_dataset: Dataset, ctx: mx.Context = get_mxnet_context()
 ):
     for representation in self.representations:
         representation.initialize_from_dataset(input_dataset, ctx)
Пример #10
0
 def initialize_from_array(self,
                           input_array: np.ndarray,
                           ctx: mx.Context = get_mxnet_context()):
     with ctx:
         self.bin_edges.initialize()
         self.bin_centers.initialize()
Пример #11
0
 def initialize_from_dataset(self,
                             input_dataset: Dataset,
                             ctx: mx.Context = get_mxnet_context()):
     self.initialize_from_array(np.array([]), ctx)