コード例 #1
0
    def process_batch(self, key, y_pred, y, cache=None):
        _check_input_dimensions(y_pred, y)
        if hasattr(self.model, "_post_process_prediction"):
            y_pred = self.model._post_process_prediction(y_pred, key=key)

        if self.tensor_backend is None:
            self.tensor_backend = get_tensor_backend(y_pred)
        xp = self.tensor_backend

        # Get deviation from mean from cache
        # if not already computed.
        if cache is not None and "y_mean" in cache:
            y_mean = cache["y_mean"]
        else:
            y_mean = self.model.posterior_mean(y_pred=y_pred, key=key)
            if cache is not None:
                cache["y_mean"] = y_mean

        y_mean = xp.to_numpy(y_mean)
        y = xp.to_numpy(y)

        if self.mask is not None:
            if hasattr(self.model, "quantile_axis"):
                dist_axis = self.model.quantile_axis
            else:
                dist_axis = self.model.bin_axis
            if len(y.shape) > len(y_mean.shape):
                y = y.squeeze(dist_axis)
            y_mean = y_mean[y > self.mask]
            y = y[y > self.mask]

        self.y_pred.setdefault(key, []).append(y_mean.ravel())
        self.y.setdefault(key, []).append(y.ravel())
コード例 #2
0
    def process_batch(self, key, y_pred, y, cache=None):
        _check_input_dimensions(y_pred, y)
        if hasattr(self.model, "_post_process_prediction"):
            y_pred = self.model._post_process_prediction(y_pred, key=key)

        self.keys.add(key)

        if self.tensor_backend is None:
            self.tensor_backend = get_tensor_backend(y_pred)
        xp = self.tensor_backend

        crps = self.model.crps(y_pred=y_pred, y_true=y, key=key)
        if crps is None:
            return None

        crps_batches = self.crps.setdefault(key, [])
        crps = xp.to_numpy(crps)
        y = xp.to_numpy(y)

        if self.mask is not None:
            if hasattr(self.model, "quantile_axis"):
                dist_axis = self.model.quantile_axis
            else:
                dist_axis = self.model.bin_axis
            if len(y.shape) > len(crps.shape):
                y = y.squeeze(dist_axis)

            crps = crps[y > self.mask]

        crps_batches.append(crps.ravel())
コード例 #3
0
ファイル: tensor.py プロジェクト: simonpf/quantnn
    def to_tensor(cls, tensor, like=None):
        """
        Convert a tensor to a tensor of the given tensor backend.

        If the tensor is from another tensor backend, it will be converted
        to the tensor backend corresponding to this class by converting
        it to a numpy array and from that to a tensor of this backend.

        If the tensor is already a tensor of this backend, it will directly
        return the tensor, or, if ``like`` is given, convert it to a tensor
        that is compatible with the tensor ``like``.
        """
        from quantnn.backends import get_tensor_backend

        if type(tensor).__module__ in ["numpy", "numpy.ma"]:
            return cls.from_numpy(tensor, like)

        backend = get_tensor_backend(tensor)
        if backend == cls:
            if like is not None:
                return cls.as_type(tensor, like)
            else:
                return tensor
        array = backend.to_numpy(tensor)
        return cls.from_numpy(array, like)
コード例 #4
0
ファイル: transformations.py プロジェクト: simonpf/quantnn
    def __call__(self, x):
        """
        Transform tensor.

        Args:
            x: Tensor containing the values to transform.

        Return:
            Tensor containing the transformed values.

        """
        if self.xp is None:
            xp = get_tensor_backend(x)
            self.xp = xp
        else:
            xp = self.xp
        return xp.log(x.double()).float()
コード例 #5
0
ファイル: transformations.py プロジェクト: simonpf/quantnn
    def invert(self, y):
        """
        Transform transformed values back to original space.

        Args:
            y: Tensor containing the transformed values to transform
                back.

        Returns:
            Tensor containing the original values.
        """
        if self.xp is None:
            xp = get_tensor_backend(y)
            self.xp = xp
        else:
            xp = self.xp
        return xp.exp(np.log(10) * y.double()).float()
コード例 #6
0
ファイル: transformations.py プロジェクト: simonpf/quantnn
    def invert(self, y):
        """
        Transform transformed values back to original space.

        Args:
            y: Tensor containing the transformed values to transform
                back.

        Returns:
            Tensor containing the original values.
        """
        if self.xp is None:
            xp = get_tensor_backend(y)
            self.xp = xp
        else:
            xp = self.xp
        return xp.where(y > 10, y, xp.log(xp.exp(y) + 1.0))
コード例 #7
0
    def process_batch(self, key, y_pred, y, cache=None):
        _check_input_dimensions(y_pred, y)
        if hasattr(self.model, "_post_process_prediction"):
            y_pred = self.model._post_process_prediction(y_pred, key=key)

        if hasattr(self._model, "quantiles"):
            quantiles = self._model.quantiles
        else:
            quantiles = self.quantiles
            if quantiles is None:
                quantiles = np.linspace(0.05, 0.95, 10)
            y_pred = self.model.posterior_quantiles(
                y_pred=y_pred, quantiles=quantiles, key=key
            )
            if y_pred is None:
                return None

        if self.tensor_backend is None:
            self.tensor_backend = get_tensor_backend(y_pred)
        xp = self.tensor_backend

        # Get deviation from mean from cache
        # if not already computed.
        axes = list(range(len(y.shape)))
        if hasattr(self.model, "quantile_axis"):
            q_axis = self.model.quantile_axis
        else:
            q_axis = self.model.bin_axis

        del axes[q_axis]

        if self.mask is not None:
            valid_pixels = xp.as_type(y > self.mask, y)
            valid_predictions = valid_pixels * xp.as_type(y <= y_pred, y_pred)

            c = self.calibration.get(key, xp.zeros(len(quantiles), y))
            self.calibration[key] = c + valid_predictions.sum(axes)
            n = self.n_samples.get(key, xp.zeros(len(quantiles), y))
            self.n_samples[key] = n + valid_pixels.sum()
        else:
            valid_predictions = xp.as_type(y <= y_pred, y_pred)

            c = self.calibration.get(key, xp.zeros(len(quantiles), y))
            self.calibration[key] = c + valid_predictions.sum(axes)
            n = self.n_samples.get(key, xp.zeros(len(quantiles), y))
            self.n_samples[key] = n + xp.size(y)
コード例 #8
0
ファイル: transformations.py プロジェクト: simonpf/quantnn
    def __call__(self, x):
        """
        Transform tensor.

        Args:
            x: Tensor containing the values to transform.

        Return:
            Tensor containing the transformed values.

        """
        if self.xp is None:
            xp = get_tensor_backend(x)
            self.xp = xp
        else:
            xp = self.xp

        return xp.where(x > 1, x - 1.0, xp.log(x))
コード例 #9
0
    def process_batch(self, key, y_pred, y, cache=None):
        _check_input_dimensions(y_pred, y)
        if hasattr(self.model, "_post_process_prediction"):
            y_pred = self.model._post_process_prediction(y_pred, key=key)

        self.keys.add(key)

        if self.tensor_backend is None:
            self.tensor_backend = get_tensor_backend(y_pred)
        xp = self.tensor_backend

        # Get deviation from mean from cache
        # if not already computed.
        if cache is not None and "y_mean" in cache:
            y_mean = cache["y_mean"]
        else:
            y_mean = self.model.posterior_mean(y_pred=y_pred, key=key)
        if cache is not None:
            cache["y_mean"] = y_mean

        if len(y.shape) > len(y_mean.shape):
            if hasattr(self.model, "quantile_axis"):
                dist_axis = self.model.quantile_axis
            else:
                dist_axis = self.model.bin_axis
            y = y.squeeze(dist_axis)

        dy = y_mean - y

        # Calculate the squared error.
        if self.mask is not None:
            mask = xp.as_type(y > self.mask, y)
            se = self.squared_error.get(key, 0.0)
            self.squared_error[key] = se + ((mask * dy) ** 2).sum()
            n = self.n_samples.get(key, 0.0)
            self.n_samples[key] = n + mask.sum()
        else:
            se = self.squared_error.get(key, 0.0)
            self.squared_error[key] = se + (dy ** 2).sum()
            n = self.n_samples.get(key, 0.0)
            self.n_samples[key] = n + xp.size(y)
コード例 #10
0
ファイル: data.py プロジェクト: simonpf/quantnn
    def aggregate_batches(self, batches):
        """
        Aggregate list of batches.

        Args:
            batches: List of batches to aggregate.

        Return:
            Tuple ``(x, y)`` containing the aggregated inputs and outputs in
            'batches'.
        """
        xs = []
        ys = None
        # Collect batches.
        for x, y in batches:
            xs.append(x)
            if isinstance(y, dict):
                if ys is None:
                    ys = {}
                for k, y in y.items():
                    ys.setdefault(k, []).append(y)
            else:
                if ys is None:
                    ys = []
                ys.append(y)

        if self.backend is None:
            self.backend = get_tensor_backend(xs[0])

        x = self.backend.concatenate(xs, 0)
        y = utils.apply(lambda y: self.backend.concatenate(y, 0), ys)

        if self.shuffle:
            indices = self._rng.permutation(x.shape[0])
            f = lambda x: x[indices]
            x = f(x)
            y = utils.apply(f, y)
        return x, y
コード例 #11
0
    def process_batch(self, key, y_pred, y, cache=None):
        _check_input_dimensions(y_pred, y)
        if hasattr(self.model, "_post_process_prediction"):
            y_pred = self.model._post_process_prediction(y_pred, key=key)

        qf = self.model.quantile_function(y_pred=y_pred, y=y, key=key)

        if self.tensor_backend is None:
            self.tensor_backend = get_tensor_backend(y_pred)
        xp = self.tensor_backend

        qf = xp.to_numpy(qf)
        if self.mask is not None:
            y = xp.to_numpy(y)
            if hasattr(self.model, "quantile_axis"):
                dist_axis = self.model.quantile_axis
            else:
                dist_axis = self.model.bin_axis
            if len(y.shape) > len(qf.shape):
                y = y.squeeze(dist_axis)
            self.qfs.setdefault(key, []).append(qf[y > self.mask])
        else:
            self.qfs.setdefault(key, []).append(qf.ravel())