Exemplo n.º 1
0
 def matrix_inv(self):
     try:
         if self._matrix_inv is None:
             raise AttributeError
     except AttributeError:
         self._matrix_inv = utils.matrix_invert(self.matrix, helper=self._helper)
     return self._matrix_inv
Exemplo n.º 2
0
    def density(self, points, data, weights=None, reflect=None, params=None):
        """Calculate the Density Function using this Kernel.

        Arguments
        ---------
        points : (D, N), 2darray of float,
            `N` points at which to evaluate the density function over `D` parameters (dimensions).
            Locations must be specified for each dimension of the data, or for each of target
            `params` dimensions of the data.

        """
        matrix_inv = self.matrix_inv
        points = np.atleast_2d(points)
        npar_pnts, num_points = np.shape(points)
        norm = self.norm

        # ----------------    Process Arguments and Sanitize
        matrix = self.matrix
        # Select subset of parameters
        if params is not None:
            params = np.atleast_1d(params)
            matrix = matrix[np.ix_(params, params)]
            # Recalculate norm & matrix-inverse
            norm = np.sqrt(np.linalg.det(matrix))
            matrix_inv = utils.matrix_invert(matrix, helper=self._helper)
            if npar_pnts != len(params):
                err = "Dimensions of `points` ({}) does not match `params` ({})!".format(
                    npar_pnts, len(params))
                raise ValueError(err)

        npar_data, num_data = np.shape(data)
        if npar_pnts != npar_data:
            err = "Dimensions of `data` ({}) does not match `points` ({})!".format(
                npar_data, npar_pnts)
            raise ValueError(err)

        if weights is None:
            weights = np.ones(num_data)
        elif (np.shape(weights) != (num_data,)):
            err = "Shape of `weights` ({}) does not match number of data points ({})!".format(
                np.shpae(weights), num_data)
            raise ValueError(err)

        # -----------------    Calculate Density

        norm *= self.distribution.norm(npar_pnts)
        try:
            whitening = sp.linalg.cholesky(matrix_inv)
        except:
            logging.error("Failed to construct cholesky on {}, {}".format(matrix_inv, matrix))
            raise
        kfunc = self.distribution._evaluate
        result = _evaluate_numba(whitening, data, points, weights, kfunc)

        if reflect is None:
            result = result / norm
            return result

        # -------------------   Perform Reflection

        if (len(reflect) != npar_data):
            err = "ERROR: shape of reflect `{}` does not match data `{}`!".format(
                np.shape(reflect), np.shape(data))
            raise ValueError(err)

        for ii, reflect_dim in enumerate(reflect):
            if reflect_dim is None:
                continue

            for loc in reflect_dim:
                if loc is None:
                    continue

                # shape (D,N) i.e. (dimensions, data-points)
                refl_data = np.array(data)
                refl_data[ii, :] = 2*loc - refl_data[ii, :]
                result += _evaluate_numba(whitening, refl_data, points, weights, kfunc)

            lo = -np.inf if (reflect_dim[0] is None) else reflect_dim[0]
            hi = +np.inf if (reflect_dim[1] is None) else reflect_dim[1]
            idx = (points[ii, :] < lo) | (hi < points[ii, :])
            result[idx] = 0.0

        result = result / norm
        return result
Exemplo n.º 3
0
    def density(self, points, data, weights=None, reflect=None, params=None):
        """Calculate the Density Function using this Kernel.

        Arguments
        ---------
        points : (D, N), 2darray of float,
            `N` points at which to evaluate the density function over `D` parameters (dimensions).
            Locations must be specified for each dimension of the data, or for each of target
            `params` dimensions of the data.

        """
        matrix_inv = self.matrix_inv
        norm = self.norm
        points = np.atleast_2d(points)
        npar_pnts, num_points = np.shape(points)

        # ----------------    Process Arguments

        # Select subset of parameters
        if params is not None:
            params = np.atleast_1d(params)
            matrix = self.matrix[np.ix_(params, params)]
            # Recalculate norm & matrix-inverse
            norm = np.sqrt(np.linalg.det(matrix))
            matrix_inv = utils.matrix_invert(matrix, helper=self._helper)
            if npar_pnts != len(params):
                err = "Dimensions of `points` ({}) does not match `params` ({})!".format(
                    npar_pnts, len(params))
                raise ValueError(err)

        npar_data, num_data = np.shape(data)
        if npar_pnts != npar_data:
            err = "Dimensions of `data` ({}) does not match `points` ({})!".format(
                npar_data, npar_pnts)
            raise ValueError(err)

        if (weights is not None) and (np.shape(weights) != (num_data,)):
            err = "Shape of `weights` ({}) does not match number of data points ({})!".format(
                np.shpae(weights), num_data)
            raise ValueError(err)

        # if (reflect is not None) and (len(reflect) != npar_data):
        #     err = "Length of `reflect` ({}) does not much data dimensions ({})!".format(
        #         len(reflect), npar_data)
        #     raise ValueError(err)
        reflect = _check_reflect(reflect, data, weights=weights)

        # -----------------    Calculate Density

        whitening = sp.linalg.cholesky(matrix_inv)

        # Construct the whitened sampling points
        white_points = np.dot(whitening, points)

        result = np.zeros((num_points,), dtype=float)
        # Construct the 'whitened' (independent) dataset
        white_dataset = np.dot(whitening, data)

        # NOTE: optimize: can the for-loop be sped up?
        if weights is None:
            weights = np.ones(num_data)

        for ii in range(num_data):
            yy = white_points - white_dataset[:, ii, np.newaxis]
            temp = weights[ii] * self.distribution.evaluate(yy)
            result += temp.squeeze()

        if reflect is None:
            result = result / norm
            return result

        # -------------------   Perform Reflection

        for ii, reflect_dim in enumerate(reflect):
            if reflect_dim is None:
                continue

            for loc in reflect_dim:
                if loc is None:
                    continue

                # shape (D,N) i.e. (dimensions, data-points)
                refl_data = np.array(data)
                refl_data[ii, :] = 2*loc - refl_data[ii, :]
                white_dataset = np.dot(whitening, refl_data)
                # Construct the whitened sampling points
                #    shape (D,M) i.e. (dimensions, sample-points)
                points = np.array(points)
                white_points = np.dot(whitening, points)

                if num_points >= num_data:
                    for jj in range(num_data):
                        yy = white_points - white_dataset[:, jj, np.newaxis]
                        result += weights[jj] * self.distribution.evaluate(yy)
                else:
                    for jj in range(num_points):
                        yy = white_dataset - white_points[:, jj, np.newaxis]
                        res = weights * self.distribution.evaluate(yy)
                        result[jj] += np.sum(res, axis=0)

            lo = -np.inf if reflect_dim[0] is None else reflect_dim[0]
            hi = +np.inf if reflect_dim[1] is None else reflect_dim[1]
            idx = (points[ii, :] < lo) | (hi < points[ii, :])
            result[idx] = 0.0

        result = result / norm
        return result