示例#1
0
    def _integrate_half2(self, name1, name2, **var_map):
        if self.poly.highest_power(name1) != 2 or self.poly.highest_power(
                name2) != 2:
            raise RuntimeError(f'Dependency on "{name1}" and {name2}" must '
                               f"be quadratic.")

        a11 = self.poly.collect_for(Factor(name1, 2))
        a22 = self.poly.collect_for(Factor(name2, 2))
        a12 = self.poly.collect_for(Factor(name1,
                                           1)).collect_for(Factor(name2, 1))
        b1 = self.poly.collect_for(Factor(name1, 1)).reject(name2)
        b2 = self.poly.collect_for(Factor(name2, 1)).reject(name1)
        c = self.poly.reject(name1).reject(name2)

        # Evaluate and scale A.
        a11 = -2 * a11.eval(**var_map)
        a22 = -2 * a22.eval(**var_map)
        a12 = -1 * a12.eval(**var_map)
        b1 = b1.eval(**var_map)
        b2 = b2.eval(**var_map)
        c = c.eval(**var_map)

        # Determinant of A:
        a_det = a11 * a22 - a12**2

        # Inverse of A, which corresponds to variance of distribution after
        # completing the square:
        ia11 = a22 / a_det
        ia12 = -a12 / a_det
        ia22 = a11 / a_det

        # Mean of distribution after completing the square:
        mu1 = ia11 * b1 + ia12 * b2
        mu2 = ia12 * b1 + ia22 * b2

        # Normalise and compute CDF part.
        x1 = -mu1 / safe_sqrt(ia11)
        x2 = -mu2 / safe_sqrt(ia22)
        rho = ia12 / safe_sqrt(ia11 * ia22)

        # Evaluate CDF for all `x1` and `x2`.
        orig_shape = B.shape(mu1)
        num = reduce(operator.mul, orig_shape, 1)
        x1 = B.reshape(x1, num)
        x2 = B.reshape(x2, num)
        rho = rho * B.ones(x1)
        cdf_part = B.reshape(B.bvn_cdf(x1, x2, rho), *orig_shape)

        # Compute exponentiated part.
        quad_form = 0.5 * (ia11 * b1**2 + ia22 * b2**2 + 2 * ia12 * b1 * b2)
        det_part = 2 * B.pi / safe_sqrt(a_det)
        exp_part = det_part * B.exp(quad_form + c)

        return self.const * cdf_part * exp_part
示例#2
0
 def eig(a, compute_eigvecs=True):
     if compute_eigvecs:
         vals, vecs = B.eig(a, compute_eigvecs=True)
         vals = B.flatten(vals)
         if B.rank(vecs) == 3:
             vecs = B.transpose(vecs, perm=(1, 0, 2))
             vecs = B.reshape(vecs, 3, -1)
         order = compute_order(vals)
         return B.take(vals, order), B.abs(B.take(vecs, order, axis=1))
     else:
         vals = B.flatten(B.eig(a, compute_eigvecs=False))
         return B.take(vals, compute_order(vals))
示例#3
0
    def get(self, shape):
        """Get a batch of tensor of a particular shape.

        Args:
            shape (shape): Shape of tensor.

        Returns:
            tensor: Batch of tensors of shape `shape`.
        """
        length = reduce(mul, shape, 1)
        res = self.source[:, self.index:self.index + length]
        self.index += length
        return B.reshape(res, -1, *shape)
示例#4
0
    def unpack(self, package):
        """Unpack vector.

        Args:
            package (tensor): Vector to unpack.

        Returns:
            list[tensor]: Original objects.
        """
        i, outs = 0, []
        for shape, length in zip(self._shapes, self._lengths):
            outs.append(B.reshape(package[i:i + length], *shape))
            i += length
        return outs
示例#5
0
文件: util.py 项目: wesselb/varz
def unpack(package: B.Numeric, *shapes):
    """Unpack vector.

    Args:
        package (tensor): Tensor to unpack.
        *shapes (shape): Shapes of objects to unpack.

    Returns:
        list[tensor]: Original objects.
    """
    if B.rank(package) != 1:
        raise ValueError("Package must be a vector.")

    # Unpack package.
    lengths = [reduce(mul, shape, 1) for shape in shapes]
    i, outs = 0, []
    for length, shape in zip(lengths, shapes):
        outs.append(B.reshape(package[i : i + length], *shape))
        i += length
    return outs
示例#6
0
def _reshape_cols(a, *indices):
    return B.transpose(B.reshape(B.transpose(a), *reversed(indices)))
示例#7
0
def reshape(a: Dense, rows: B.Int, cols: B.Int):
    return Dense(B.reshape(a.mat, rows, cols))
示例#8
0
def reshape(a: AbstractMatrix, rows: B.Int, cols: B.Int):
    warn_upmodule(f"Converting {a} to dense for reshaping.",
                  category=ToDenseWarning)
    return Dense(B.reshape(B.dense(a), rows, cols))
示例#9
0
def _reshape(a):
    rows, cols = B.shape(a)
    return B.reshape(a, rows * cols, -1)
示例#10
0
文件: test_util.py 项目: wesselb/wbml
def test_batchvars():
    source = B.randn(5, 2 + 3 * 4)
    vs = BatchVars(source=source)
    approx(vs.get(shape=(1, 2)), B.reshape(source[:, :2], 5, 1, 2))
    approx(vs.get(shape=(3, 4)), B.reshape(source[:, 2:], 5, 3, 4))
示例#11
0
    def _get_var(self,
                 transform,
                 inverse_transform,
                 init,
                 generate_init,
                 shape,
                 dtype,
                 name):
        # If the name already exists, return that variable.
        try:
            return self[name]
        except KeyError:
            pass

        # A new variable will be added. Clear lookup cache.
        self._get_vars_cache.clear()

        # Resolve data type.
        dtype = self.dtype if dtype is None else dtype

        # If no source is provided, get the latent from from the provided
        # initialiser.
        if self.source is None:
            # Resolve initialisation and inverse transform.
            if init is None:
                init = generate_init(shape=shape, dtype=dtype)
            else:
                init = B.cast(dtype, init)

            # Construct optimisable variable.
            latent = inverse_transform(init)
            if isinstance(self.dtype, B.TFDType):
                latent = tf.Variable(latent)
            elif isinstance(self.dtype, B.TorchDType):
                pass  # All is good in this case.
            else:
                # Must be a NumPy data type.
                assert isinstance(self.dtype, B.NPDType)
                latent = np.array(latent)
        else:
            # Get the latent variable from the source.
            length = reduce(mul, shape, 1)
            latent_flat = \
                self.source[self.source_index:self.source_index + length]
            self.source_index += length

            # Cast to the right data type.
            latent = B.cast(dtype, B.reshape(latent_flat, *shape))

        # Store transforms.
        self.vars.append(latent)
        self.transforms.append(transform)
        self.inverse_transforms.append(inverse_transform)

        # Get index of the variable.
        index = len(self.vars) - 1

        # Store name if given.
        if name is not None:
            self.name_to_index[name] = index

        # Generate the variable and return.
        return transform(latent)
示例#12
0
    def _get_var(
        self,
        transform,
        inverse_transform,
        init,
        generate_init,
        shape,
        shape_latent,
        dtype,
        name,
    ):
        # If the name already exists, return that variable.
        try:
            return self[name]
        except KeyError:
            pass

        # A new variable will be added. Clear lookup cache.
        self._get_latent_vars_cache.clear()

        # Resolve data type.
        dtype = self._resolve_dtype(dtype)

        # If no source is provided, get the latent from from the provided
        # initialiser.
        if self.source is None:
            # Resolve initialisation.
            if init is None:
                init = generate_init(shape=shape, dtype=dtype)
            else:
                init = B.cast(dtype, init)

            # Ensure that the initialisation is on the right device.
            init = B.to_active_device(init)

            # Allow broadcasting in the initialisation.
            if shape is not None:
                init = init * B.ones(B.dtype(init), *shape)

            # Double check the shape of the initialisation.
            if shape is not None and Shape(*shape) != Shape(*B.shape(init)):
                raise ValueError(
                    f"Shape of initial value {B.shape(init)} is not equal to the "
                    f"desired shape {shape}.")

            # Construct optimisable variable.
            latent = inverse_transform(init)
            if isinstance(self.dtype, B.TFDType):
                latent = tf.Variable(latent)
            elif isinstance(self.dtype, B.TorchDType):
                pass  # All is good in this case.
            elif isinstance(self.dtype, B.JAXDType):
                latent = jnp.array(latent)
            else:
                # Must be a NumPy data type.
                assert isinstance(self.dtype, B.NPDType)
                latent = np.array(latent)
        else:
            # Get the latent variable from the source.
            length = reduce(mul, shape_latent, 1)
            latent_flat = self.source[self.source_index:self.source_index +
                                      length]
            self.source_index += length

            # Cast to the right data type.
            latent = B.cast(dtype, B.reshape(latent_flat, *shape_latent))

        # Store transforms.
        self.vars.append(latent)
        self.transforms.append(transform)
        self.inverse_transforms.append(inverse_transform)

        # Get index of the variable.
        index = len(self.vars) - 1

        # Store name if given.
        if name is not None:
            self.name_to_index[name] = index

        # Generate the variable and return.
        return transform(latent)